diff --git a/.testr.conf b/.testr.conf index 9ceb874d29..418e573987 100644 --- a/.testr.conf +++ b/.testr.conf @@ -2,6 +2,6 @@ test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./gbpservice/neutron/tests/unit} $LISTOPT $IDOPTION + ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./gbpservice} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list diff --git a/devstack/README-NFP-Advanced-Mode b/devstack/README-NFP-Advanced-Mode index 73c42c8300..70440fd916 100644 --- a/devstack/README-NFP-Advanced-Mode +++ b/devstack/README-NFP-Advanced-Mode @@ -12,15 +12,22 @@ Fresh Installation Steps: (3) Configure local.conf # Modify NFP_DEVSTACK_MODE to 'advanced' - # Modify the GBPSERVICE_BRANCH to point to the top patch in devstack changeset refs/changes/05/335405 # Configure following external network details, EXT_NET_GATEWAY= EXT_NET_ALLOCATION_POOL_START= EXT_NET_ALLOCATION_POOL_END= EXT_NET_CIDR= - # Configure the following image paths, + # Configure the Configurator VM image path(optional), + # If configured, install step uploads the specified image + # If not configured, install step will build a new one and upload it + ConfiguratorQcow2Image= + # Configure the Service VM image paths(optional), + # If configured, install step uploads the specified images + # If not configured, install step ignores uploading these service images VyosQcow2Image= - Haproxy_LBaasV2_Qcow2Image= + HaproxyQcow2Image= + # Configure PUBLIC_INTERFACE with the public interface name + # Modify the GBPSERVICE_BRANCH to point to the top patch in devstack changeset refs/changes/05/335405 (4) Install devstack. # ./stack.sh diff --git a/devstack/Readme-NFP-install.txt b/devstack/Readme-NFP-install.txt index 13273df616..991fbb4e57 100644 --- a/devstack/Readme-NFP-install.txt +++ b/devstack/Readme-NFP-install.txt @@ -17,48 +17,25 @@ Fresh Installation Steps: * Advanced Mode Configuration: # Devstack installation in enterprise mode - Example: [[ $ENABLE_NFP = True ]] && NFP_DEVSTACK_MODE=advanced - # Change the value of GBPSERVICE_BRANCH to use different branch(in OC repo)/patch(in Openstack repo) - # External network details - EXT_NET_GATEWAY= - EXT_NET_ALLOCATION_POOL_START= - EXT_NET_ALLOCATION_POOL_END= - EXT_NET_CIDR= - # VyOS image path - * Available only at 192.168.100.135:/home/stack/service_images/vyos.qcow2 - Example: VyosQcow2Image=/home/stack/images/vyos.qcow2 - # Haproxy LBaaS V2 image path - Haproxy_LBaasV2_Qcow2Image= - # Public interface - Example: PUBLIC_INTERFACE=eth1 - - * Enterprise Mode Configuration: - # Devstack installation in enterprise mode - Example: [[ $ENABLE_NFP = True ]] && NFP_DEVSTACK_MODE=enterprise - # Change the value of GBPSERVICE_BRANCH to use different branch + NFP_DEVSTACK_MODE=advanced # External network details - EXT_NET_GATEWAY= - EXT_NET_ALLOCATION_POOL_START= - EXT_NET_ALLOCATION_POOL_END= - EXT_NET_CIDR= - # VyOS image path - * Available only at 192.168.100.135:/home/stack/service_images/vyos.qcow2 - Example: VyosQcow2Image=/home/stack/images/vyos.qcow2 - # Haproxy LBaaS V2 image path - Haproxy_LBaasV2_Qcow2Image= - # Public interface - Example: PUBLIC_INTERFACE=eth1 - # Visibility GIT Repository Credentials - GIT_ACCESS_USERNAME= - GIT_ACCESS_PASSWORD= - # Docker image path - * Available only at 192.168.100.50. Change and configure for different nework - DOCKER_IMAGES_URL=http://192.168.100.50/docker_images/ - # ASAv image path - AsavQcow2Image= - # PaloAlto image path - PaloAltoQcow2Image= - + EXT_NET_GATEWAY= + EXT_NET_ALLOCATION_POOL_START= + EXT_NET_ALLOCATION_POOL_END= + EXT_NET_CIDR= + # Configurator VM image path, its optional + # If configured, install step uploads the specified image + # If not configured, install step will build a new one and upload it + ConfiguratorQcow2Image= + # Service VM image paths, they are optional + # One can build service images referring to the section "Build service images". + # If configured, install step uploads the specified images + # If not configured, install step ignores uploading these service images. + VyosQcow2Image= + HaproxyQcow2Image= + # Public interface name + PUBLIC_INTERFACE= + # Change the value of GBPSERVICE_BRANCH to use different branch(in OC repo)/patch(in Openstack repo) (4) Run stack.sh from the /home/stack/devstack/ directory # ./stack.sh @@ -77,3 +54,36 @@ Re-installation Steps: (2) Follow the fresh installation steps + +Build service images: +==================== + +Steps to get the scripts to build images + # git clone -b mitaka_21st_march_base --single-branch https://github.com/oneconvergence/group-based-policy.git /home/stack/gbp_mitaka_21st_march_base + +Steps to setup the diskimage build + # sudo -H -E pip install -r /home/stack/gbp_mitaka_21st_march_base/gbpservice/tests/contrib/diskimage-create/requirements.txt + # sudo apt-get install -y --force-yes qemu-utils + # sudo apt-get install -y --force-yes dpkg-dev + +Steps to build VyOS service image: + # cd /home/stack/gbp_mitaka_21st_march_base/gbpservice/tests/contrib/diskimage-create/vyos/ + # sudo python vyos_image_create.py vyos_conf.json +Image location: + # /home/stack/gbp_mitaka_21st_march_base/gbpservice/tests/contrib/diskimage-create/vyos/output/vyos.qcow2 + +Steps to build Haproxy service image: + # cd /home/stack/gbp_mitaka_21st_march_base/gbpservice/tests/contrib/diskimage-create/ + # sudo python build_image.py haproxy_conf.json +Image location: + # /home/stack/gbp_mitaka_21st_march_base/gbpservice/tests/contrib/diskimage-create/output/haproxy.qcow2 + + +Upload service images: +===================== + +Steps to upload VyOS service image: + # glance image-create --name vyos --disk-format qcow2 --container-format bare --visibility public --file /home/stack/gbp_mitaka_21st_march_base/gbpservice/tests/contrib/diskimage-create/vyos/output/vyos.qcow2 + +Steps to upload Haproxy service image: + # glance image-create --name haproxy --disk-format qcow2 --container-format bare --visibility public --file /home/stack/gbp_mitaka_21st_march_base/gbpservice/tests/contrib/diskimage-create/output/haproxy.qcow2 diff --git a/devstack/lib/nfp b/devstack/lib/nfp index a0a45336f3..88bc95157d 100644 --- a/devstack/lib/nfp +++ b/devstack/lib/nfp @@ -1,182 +1,319 @@ -#!/bin/bash - -TOP_DIR=$PWD - +# lib/nfp +# functions - functions specific to nfp implementation + +# Dependencies: +# ``functions`` file +# ``DEST`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# - prepare_nfp_image_builder +# - install_nfpgbpservice +# - init_nfpgbpservice +# - assign_user_role_credential +# - create_nfp_gbp_resources +# - create_nfp_image +# - launch_configuratorVM +# - copy_nfp_files_and_start_process +# +# ``unstack.sh`` calls the entry points in this order: + +# Set up default directories +DEVSTACK_DIR=$PWD NFPSERVICE_DIR=$DEST/gbp -DISK_IMAGE_DIR=$DEST/gbp/gbpservice/tests/contrib NEUTRON_CONF_DIR=/etc/neutron NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf NFP_CONF_DIR=/etc/nfp +DISKIMAGE_CREATE_DIR=$NFPSERVICE_DIR/gbpservice/tests/contrib/diskimage-create + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Functions +# --------- +# prepare_nfp_image_builder() - Install the requirements for dib function prepare_nfp_image_builder { - sudo -H -E pip install -r $DISK_IMAGE_DIR/diskimage-create/requirements.txt + sudo -H -E pip install -r $DISKIMAGE_CREATE_DIR/requirements.txt sudo apt-get install -y --force-yes qemu-utils sudo apt-get install -y --force-yes dpkg-dev - if [[ $NFP_DEVSTACK_MODE != base ]]; then + if [[ $NFP_DEVSTACK_MODE = advanced ]]; then sudo wget -qO- https://get.docker.com/ | bash fi } -function create_port_for_vm { - if [[ $1 = configurator ]]; then - instance_name=$ConfiguratorInstanceName +# install_nfpgbpservice() - Collect source and prepare +function install_nfpgbpservice { + git_clone $GBPSERVICE_REPO $NFPSERVICE_DIR $GBPSERVICE_BRANCH + mv $NFPSERVICE_DIR/test-requirements.txt $NFPSERVICE_DIR/_test-requirements.txt + setup_develop $NFPSERVICE_DIR + mv -f $NEUTRON_CONF_DIR/policy.json $NEUTRON_CONF_DIR/policy.json.original 2>/dev/null; true + cp -f $NFPSERVICE_DIR/etc/policy.json $NEUTRON_CONF_DIR/policy.json + mv $NFPSERVICE_DIR/_test-requirements.txt $NFPSERVICE_DIR/test-requirements.txt +} + +# init_nfpgbpservice() - Initialize databases, etc. +function init_nfpgbpservice { + # Run GBP db migrations + gbp-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head + iniset $NEUTRON_CONF DEFAULT policy_dirs $NFP_CONF_DIR +} + +# assign_user_role_credential() - Assign Service role to the users +function assign_user_role_credential { + source $DEVSTACK_DIR/openrc admin admin + + serviceTenantID=`keystone tenant-list | grep "service" | awk '{print $2}'` + serviceRoleID=`keystone role-list | grep "service" | awk '{print $2}'` + adminRoleID=`keystone role-list | grep "admin" | awk '{print $2}'` + + keystone user-role-add\ + --user nova\ + --tenant $serviceTenantID\ + --role $serviceRoleID + + keystone user-role-add\ + --user neutron\ + --tenant $serviceTenantID\ + --role $adminRoleID +} + +# create_ext_net() - Create an external network +function create_ext_net { + source $DEVSTACK_DIR/stackrc + + EXT_NET_NAME=ext-net + EXT_NET_SUBNET_NAME=ext-net-subnet + EXT_NET_GATEWAY=$EXT_NET_GATEWAY + EXT_NET_ALLOCATION_POOL_START=$EXT_NET_ALLOCATION_POOL_START + EXT_NET_ALLOCATION_POOL_END=$EXT_NET_ALLOCATION_POOL_END + EXT_NET_CIDR=$EXT_NET_CIDR + + neutron net-create\ + --router:external=true\ + --shared\ + $EXT_NET_NAME + + neutron subnet-create\ + --ip_version 4\ + --gateway $EXT_NET_GATEWAY\ + --name $EXT_NET_SUBNET_NAME\ + --allocation-pool start=$EXT_NET_ALLOCATION_POOL_START,end=$EXT_NET_ALLOCATION_POOL_END\ + $EXT_NET_NAME\ + $EXT_NET_CIDR +} + +# create_ep_and_nsp() - Create GBP resources for the external netwrok +function create_ep_and_nsp { + subnet_id=`neutron net-list | grep "$EXT_NET_NAME" | awk '{print $6}'` + + gbp external-segment-create\ + --ip-version 4\ + --cidr $EXT_NET_CIDR\ + --external-route destination=0.0.0.0/0,nexthop=\ + --shared True\ + --subnet_id=$subnet_id\ + default + + gbp nat-pool-create\ + --ip-version 4\ + --ip-pool $EXT_NET_CIDR\ + --external-segment default\ + --shared True\ + default + + gbp nsp-create\ + --network-service-params type=ip_pool,name=vip_ip,value=nat_pool\ + svc_mgmt_fip_policy +} + +# create_nfp_gbp_resources() - Create various GBP resources +function create_nfp_gbp_resources { + source $DEVSTACK_DIR/openrc neutron service + unset OS_USER_DOMAIN_ID + unset OS_PROJECT_DOMAIN_ID + + if [[ $NFP_DEVSTACK_MODE = base ]]; then + + IMAGE_NAME="reference_configurator_image" + FLAVOR=m1.nfp-tiny + + gbp service-profile-create\ + --servicetype LOADBALANCER\ + --insertion-mode l3\ + --shared True\ + --service-flavor service_vendor=haproxy,device_type=None\ + --vendor NFP\ + base_mode_lb + + gbp service-profile-create\ + --servicetype FIREWALL\ + --insertion-mode l3\ + --shared True\ + --service-flavor service_vendor=nfp,device_type=nova,image_name=$IMAGE_NAME,flavor=$FLAVOR\ + --vendor NFP\ + base_mode_fw_vm + else - instance_name=$VisibilityInstanceName + + gbp service-profile-create\ + --servicetype LOADBALANCER\ + --insertion-mode l3\ + --shared True\ + --service-flavor service_vendor=haproxy,device_type=nova\ + --vendor NFP\ + lb_profile + + gbp service-profile-create\ + --servicetype LOADBALANCERV2\ + --insertion-mode l3\ + --shared True\ + --service-flavor service_vendor=haproxy_lbaasv2,device_type=nova,flavor=m1.small\ + --vendor NFP\ + lbv2_profile + + gbp service-profile-create\ + --servicetype FIREWALL\ + --insertion-mode l3\ + --shared True\ + --service-flavor service_vendor=vyos,device_type=nova\ + --vendor NFP\ + vyos_fw_profile + + gbp service-profile-create\ + --servicetype VPN\ + --insertion-mode l3\ + --shared True\ + --service-flavor service_vendor=vyos,device_type=nova\ + --vendor NFP\ + vpn_profile + + create_ext_net + create_ep_and_nsp + fi + + gbp l3policy-create\ + --ip-version 4\ + --proxy-ip-pool=192.169.0.0/24\ + --ip-pool 120.0.0.0/24\ + --subnet-prefix-length 24\ + service_management + + gbp l2policy-create\ + --l3-policy service_management\ + svc_management_ptg + + gbp group-create\ + svc_management_ptg\ + --service_management True\ + --l2-policy\ + svc_management_ptg + + neutron router-gateway-clear\ + l3p_service_management + + gbp l3policy-update\ + --external-segment ""\ + service_management +} + +# create_port_for_vm() - Create a port, and get its details +# Args: +# $1 - image_name +# $2 - instance name +function create_port_for_vm { GROUP="svc_management_ptg" - PortId=$(gbp policy-target-create --policy-target-group $GROUP $instance_name | grep port_id | awk '{print $4}') + PortId=$(gbp policy-target-create --policy-target-group $GROUP $2 | grep port_id | awk '{print $4}') IpAddr_extractor=`neutron port-list --format value | grep $PortId | awk '{print $7}'` IpAddr_purge_last=${IpAddr_extractor::-1} IpAddr=${IpAddr_purge_last//\"/} echo "IpAddr of port($PortId): $IpAddr" - - if [[ $1 = configurator ]]; then - configurator_image_name=$1 - configurator_port_id=$PortId - configurator_ip=$IpAddr - else - visibility_image_name=$1 - visibility_port_id=$PortId - visibility_ip=$IpAddr - fi -} - -function configure_vis_ip_addr_in_docker { - echo "Visibility VM IP address is: $visibility_ip" - sed -i "s/VIS_VM_IP_ADDRESS/"$visibility_ip"/" $NFPSERVICE_DIR/gbpservice/nfp/configurator/Dockerfile + configurator_image_name=$1 + configurator_port_id=$PortId + configurator_ip=$IpAddr } +# create_nfp_image() - Create and upload the service images function create_nfp_image { - source $TOP_DIR/openrc neutron service + source $DEVSTACK_DIR/openrc neutron service unset OS_USER_DOMAIN_ID unset OS_PROJECT_DOMAIN_ID - # during diskimage build, the following setting in apache2 is needed for local repo - sudo cp /etc/apache2/sites-available/000-default.conf /etc/apache2/sites-enabled/ - sudo service apache2 restart - if [[ $NFP_DEVSTACK_MODE = base ]]; then - RefConfiguratorQcow2ImageName=reference_configurator_image echo "Building Image: $RefConfiguratorQcow2ImageName" - sudo python $DISK_IMAGE_DIR/diskimage-create/disk_image_create.py $DISK_IMAGE_DIR/diskimage-create/ref_configurator_conf.json - RefConfiguratorQcow2Image=$(cat /tmp/image_path) + sudo python $DISKIMAGE_CREATE_DIR/disk_image_create.py $DISKIMAGE_CREATE_DIR/ref_configurator_conf.json + RefConfiguratorQcow2Image=$(cat $DISKIMAGE_CREATE_DIR/output/last_built_image_path) echo "Uploading Image: $RefConfiguratorQcow2ImageName" glance image-create --name $RefConfiguratorQcow2ImageName --disk-format qcow2 --container-format bare --visibility public --file $RefConfiguratorQcow2Image openstack --os-cloud=devstack-admin flavor create --ram 512 --disk 3 --vcpus 1 m1.nfp-tiny - else - - if [[ $NFP_DEVSTACK_MODE = enterprise ]]; then - - ConfiguratorQcow2ImageName=configurator - ConfiguratorInstanceName="configuratorVM_instance" - create_port_for_vm $ConfiguratorQcow2ImageName - - if [[ $ConfiguratorQcow2Image = build ]]; then - echo "Building Image: $ConfiguratorQcow2ImageName" - sudo python $DISK_IMAGE_DIR/diskimage-create/disk_image_create.py $DISK_IMAGE_DIR/diskimage-create/configurator_conf.json $GBPSERVICE_BRANCH - ConfiguratorQcow2Image=$(cat /tmp/image_path) - fi - echo "Uploading Image: $ConfiguratorQcow2ImageName" - glance image-create --name $ConfiguratorQcow2ImageName --disk-format qcow2 --container-format bare --visibility public --file $ConfiguratorQcow2Image - - VisibilityQcow2ImageName=visibility - VisibilityInstanceName="VisibilityVM_instance" - create_port_for_vm $VisibilityQcow2ImageName - - if [[ $VisibilityQcow2Image = build ]]; then - - # edits the docker file to add visibility vm IP address - configure_vis_ip_addr_in_docker - - # prepare visibility source, this is needed for diskimage build - cd /home/stack/ - sudo rm -rf visibility - sudo git clone https://$GIT_ACCESS_USERNAME:$GIT_ACCESS_PASSWORD@github.com/oneconvergence/visibility.git -b $VISIBILITY_GIT_BRANCH - echo "Building Image: $VisibilityQcow2ImageName" - sudo python $DISK_IMAGE_DIR/diskimage-create/visibility_disk_image_create.py $DISK_IMAGE_DIR/diskimage-create/visibility_conf.json $GBPSERVICE_BRANCH $TOP_DIR/local.conf - VisibilityQcow2Image=$(cat /tmp/image_path) - fi - echo "Uploading Image: $VisibilityQcow2ImageName" - glance image-create --name $VisibilityQcow2ImageName --disk-format qcow2 --container-format bare --visibility public --file $VisibilityQcow2Image - - AsavQcow2ImageName=asav - echo "Uploading Image: $AsavQcow2ImageName" - glance image-create --name $AsavQcow2ImageName --disk-format qcow2 --container-format bare --visibility public --file $AsavQcow2Image - PaloAltoQcow2ImageName=paloalto - echo "Uploading Image: $PaloAltoQcow2ImageName" - glance image-create --name $PaloAltoQcow2ImageName --disk-format qcow2 --container-format bare --visibility public --file $PaloAltoQcow2Image - - else - - ConfiguratorQcow2ImageName=configurator - ConfiguratorInstanceName="configuratorVM_instance" - create_port_for_vm $ConfiguratorQcow2ImageName - if [[ $ConfiguratorQcow2Image = build ]]; then - echo "Building Image: $ConfiguratorQcow2ImageName" - sudo python $DISK_IMAGE_DIR/diskimage-create/disk_image_create.py $DISK_IMAGE_DIR/diskimage-create/configurator_conf.json $GBPSERVICE_BRANCH - ConfiguratorQcow2Image=$(cat /tmp/image_path) - fi - echo "Uploading Image: $ConfiguratorQcow2ImageName" - glance image-create --name $ConfiguratorQcow2ImageName --disk-format qcow2 --container-format bare --visibility public --file $ConfiguratorQcow2Image - + ConfiguratorQcow2ImageName=configurator + ConfiguratorInstanceName="configuratorVM_instance" + create_port_for_vm $ConfiguratorQcow2ImageName $ConfiguratorInstanceName + if [[ $ConfiguratorQcow2Image = build ]]; then + echo "Building Image: $ConfiguratorQcow2ImageName" + sudo python $DISKIMAGE_CREATE_DIR/disk_image_create.py $DISKIMAGE_CREATE_DIR/configurator_conf.json $GBPSERVICE_BRANCH + ConfiguratorQcow2Image=$(cat $DISKIMAGE_CREATE_DIR/output/last_built_image_path) fi + echo "Uploading Image: $ConfiguratorQcow2ImageName" + glance image-create --name $ConfiguratorQcow2ImageName --disk-format qcow2 --container-format bare --visibility public --file $ConfiguratorQcow2Image VyosQcow2ImageName=vyos - if [[ $VyosQcow2Image = build ]]; then - echo "Building Image: $VyosQcow2ImageName" - sudo python $DISK_IMAGE_DIR/diskimage-create/disk_image_create.py $DISK_IMAGE_DIR/diskimage-create/vyos_conf.json - VyosQcow2Image=$(cat /tmp/image_path) + if ! [[ -z $VyosQcow2Image ]]; then + echo "Uploading Image: $VyosQcow2ImageName" + glance image-create --name $VyosQcow2ImageName --disk-format qcow2 --container-format bare --visibility public --file $VyosQcow2Image fi - echo "Uploading Image: $VyosQcow2ImageName" - glance image-create --name $VyosQcow2ImageName --disk-format qcow2 --container-format bare --visibility public --file $VyosQcow2Image HaproxyQcow2ImageName=haproxy - if [[ $HaproxyQcow2Image = build ]]; then - echo "Building Image: $HaproxyQcow2ImageName" - sudo python $DISK_IMAGE_DIR/diskimage-create/disk_image_create.py $DISK_IMAGE_DIR/diskimage-create/haproxy_conf.json - HaproxyQcow2Image=$(cat /tmp/image_path) + if ! [[ -z $HaproxyQcow2Image ]]; then + echo "Uploading Image: $HaproxyQcow2ImageName" + glance image-create --name $HaproxyQcow2ImageName --disk-format qcow2 --container-format bare --visibility public --file $HaproxyQcow2Image fi - echo "Uploading Image: $HaproxyQcow2ImageName" - glance image-create --name $HaproxyQcow2ImageName --disk-format qcow2 --container-format bare --visibility public --file $HaproxyQcow2Image - Haproxy_LBaasV2_Qcow2Image_Name=haproxy_lbaasv2 - echo "Uploading Image: $Haproxy_LBaasV2_Qcow2Image_Name" - glance image-create --name $Haproxy_LBaasV2_Qcow2Image_Name --disk-format qcow2 --container-format bare --visibility public --file $Haproxy_LBaasV2_Qcow2Image - fi - - # restore the apache2 setting that we did above - sudo rm /etc/apache2/sites-enabled/000-default.conf - sudo service apache2 restart } -function init_nfpgbpservice { - # Run GBP db migrations - gbp-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head - iniset $NEUTRON_CONF DEFAULT policy_dirs $NFP_CONF_DIR +# configure_configurator_user_data() - Configure Configurator user data +function configure_configurator_user_data { + CUR_DIR=$PWD + sudo rm -rf /opt/configurator_user_data + sudo cp -r $NFPSERVICE_DIR/devstack/exercises/nfp_service/user-data/configurator_user_data /opt/. + cd /opt + sudo rm -rf my.key my.key.pub + sudo ssh-keygen -t rsa -N "" -f my.key + value=`sudo cat my.key.pub` + sudo echo $value + sudo sed -i "8 i\ -\ $value" configurator_user_data + sudo sed -i '9d' configurator_user_data + cd $CUR_DIR } -function install_nfpgbpservice { - git_clone $GBPSERVICE_REPO $NFPSERVICE_DIR $GBPSERVICE_BRANCH - mv $NFPSERVICE_DIR/test-requirements.txt $NFPSERVICE_DIR/_test-requirements.txt - setup_develop $NFPSERVICE_DIR - mv -f $NEUTRON_CONF_DIR/policy.json $NEUTRON_CONF_DIR/policy.json.original 2>/dev/null; true - cp -f $NFPSERVICE_DIR/etc/policy.json $NEUTRON_CONF_DIR/policy.json - mv $NFPSERVICE_DIR/_test-requirements.txt $NFPSERVICE_DIR/test-requirements.txt -} +# launch_configuratorVM() - Launch the Configurator VM +function launch_configuratorVM { + echo "Collecting ImageId : for $configurator_image_name" + ImageId=`glance image-list | grep $configurator_image_name | awk '{print $2}'` + if [ ! -z "$ImageId" -a "$ImageId" != " " ]; then + echo $ImageId + else + echo "No image found with name $configurator_image_name" + exit + fi -function assign_user_role_credential { - source $TOP_DIR/openrc admin admin - serviceTenantID=`keystone tenant-list | grep "service" | awk '{print $2}'` - serviceRoleID=`keystone role-list | grep "service" | awk '{print $2}'` - adminRoleID=`keystone role-list | grep "admin" | awk '{print $2}'` - keystone user-role-add --user nova --tenant $serviceTenantID --role $serviceRoleID - keystone user-role-add --user neutron --tenant $serviceTenantID --role $adminRoleID + configure_configurator_user_data + nova boot\ + --flavor m1.medium\ + --user-data /opt/configurator_user_data\ + --image $ImageId\ + --nic port-id=$configurator_port_id\ + $ConfiguratorInstanceName + sleep 10 } +# namespace_delete() - Utility for namespace management function namespace_delete { - source $1/openrc neutron service + source $DEVSTACK_DIR/openrc neutron service #Deletion namespace NFP_P=`sudo ip netns | grep "nfp-proxy"` @@ -199,12 +336,12 @@ function namespace_delete { echo "ovs port ptr1 is removed" fi - echo "nfp-proxy cleaning success.... " + echo "nfp-proxy cleaning success." } +# namespace_create() - Utility for namespace management function namespace_create { SERVICE_MGMT_NET="l2p_svc_management_ptg" - cidr="/24" echo "Creating new namespace nfp-proxy...." #new namespace with name proxy @@ -256,7 +393,7 @@ function namespace_create { #get port id from router nampace port=`sudo ip netns exec $nm_space ip a | grep "tap" | tail -n 1 | awk '{print $7}'` - #get tag_id form port in ovs-bridge + #get tag_id form port in ovs-bridge tag_id=`sudo ovs-vsctl list port $port | grep "tag" | tail -n 1 | awk '{print $3}'` sudo ovs-vsctl set port pt1 tag=$tag_id @@ -266,253 +403,69 @@ function namespace_create { sudo ip netns exec nfp-proxy ip link set lo up sudo ip link set pt1 up - PING=`sudo ip netns exec nfp-proxy ping $2 -q -c 2 > /dev/null` + PING=`sudo ip netns exec nfp-proxy ping $configurator_ip -q -c 2 > /dev/null` if [ ${#PING} -eq 0 ]; then - echo "nfp-proxy namespcace creation success and reaching to $2" + echo "nfp-proxy namespcace creation success and reaching to $configurator_ip" else - echo "Fails reaching to $2" + echo "Fails reaching to $configurator_ip" fi sudo ip netns exec nfp-proxy /usr/bin/nfp_proxy --config-file=/etc/nfp_proxy.ini } -function create_ext_net { - source $TOP_DIR/stackrc - EXT_NET_NAME=ext-net - EXT_NET_SUBNET_NAME=ext-net-subnet - EXT_NET_GATEWAY=$EXT_NET_GATEWAY - EXT_NET_ALLOCATION_POOL_START=$EXT_NET_ALLOCATION_POOL_START - EXT_NET_ALLOCATION_POOL_END=$EXT_NET_ALLOCATION_POOL_END - EXT_NET_CIDR=$EXT_NET_CIDR - - source $TOP_DIR/openrc neutron service - unset OS_USER_DOMAIN_ID - unset OS_PROJECT_DOMAIN_ID - neutron net-create --router:external=true --shared $EXT_NET_NAME - neutron subnet-create --ip_version 4 --gateway $EXT_NET_GATEWAY --name $EXT_NET_SUBNET_NAME --allocation-pool start=$EXT_NET_ALLOCATION_POOL_START,end=$EXT_NET_ALLOCATION_POOL_END $EXT_NET_NAME $EXT_NET_CIDR -} - -function create_ep_and_nsp { - subnet_id=`neutron net-list | grep "$EXT_NET_NAME" | awk '{print $6}'` - - gbp external-segment-create --ip-version 4 --cidr $EXT_NET_CIDR --external-route destination=0.0.0.0/0,nexthop= --shared True --subnet_id=$subnet_id default - gbp nat-pool-create --ip-version 4 --ip-pool $EXT_NET_CIDR --external-segment default --shared True default - gbp ep-create --external-segments default ext_connect - gbp nsp-create --network-service-params type=ip_pool,name=vip_ip,value=nat_pool svc_mgmt_fip_policy -} - -function create_advance_sharing_ptg { - gbp l3policy-create --ip-version 4 --ip-pool 121.0.0.0/20 --proxy-ip-pool=192.167.0.0/24 --subnet-prefix-length 20 advanced_services_sharing_l3p - gbp l2policy-create --l3-policy advanced_services_sharing_l3p advance_sharing_l2p - gbp group-create --l2-policy advance_sharing_l2p Advance_Sharing_PTG -} - -function create_nfp_gbp_resources { - source $TOP_DIR/openrc neutron service - - if [[ $NFP_DEVSTACK_MODE = base ]]; then - - IMAGE_NAME="reference_configurator_image" - FLAVOR=m1.nfp-tiny - gbp service-profile-create --servicetype LOADBALANCER --insertion-mode l3 --shared True --service-flavor service_vendor=haproxy,device_type=None --vendor NFP base_mode_lb - gbp service-profile-create --servicetype FIREWALL --insertion-mode l3 --shared True --service-flavor service_vendor=nfp,device_type=nova,image_name=$IMAGE_NAME,flavor=$FLAVOR --vendor NFP base_mode_fw_vm - - else - - gbp service-profile-create --servicetype LOADBALANCER --insertion-mode l3 --shared True --service-flavor service_vendor=haproxy,device_type=nova --vendor NFP lb_profile - gbp service-profile-create --servicetype LOADBALANCERV2 --insertion-mode l3 --shared True --service-flavor service_vendor=haproxy_lbaasv2,device_type=nova,flavor=m1.small --vendor NFP lbv2_profile - gbp service-profile-create --servicetype FIREWALL --insertion-mode l3 --shared True --service-flavor service_vendor=vyos,device_type=nova --vendor NFP vyos_fw_profile - gbp service-profile-create --servicetype VPN --insertion-mode l3 --shared True --service-flavor service_vendor=vyos,device_type=nova --vendor NFP vpn_profile - - if [[ $NFP_DEVSTACK_MODE = enterprise ]]; then - gbp service-profile-create --servicetype FIREWALL --insertion-mode l3 --shared True --service-flavor service_vendor=asav,device_type=nova --vendor NFP asav_fw_profile - fi - create_ext_net - create_ep_and_nsp - create_advance_sharing_ptg - - fi - - gbp l3policy-create --ip-version 4 --proxy-ip-pool=192.169.0.0/24 --ip-pool 120.0.0.0/24 --subnet-prefix-length 24 service_management - gbp l2policy-create --l3-policy service_management svc_management_ptg - - gbp group-create svc_management_ptg --service_management True --l2-policy svc_management_ptg - neutron router-gateway-clear l3p_service_management -} - -function configure_configurator_user_data { - CUR_DIR=$PWD - sudo rm -rf /opt/configurator_user_data - sudo cp -r $DEST/gbp/devstack/exercises/nfp_service/user-data/configurator_user_data /opt/. - cd /opt - sudo rm -rf my.key my.key.pub - sudo ssh-keygen -t rsa -N "" -f my.key - value=`sudo cat my.key.pub` - sudo echo $value - sudo sed -i "8 i\ -\ $value" configurator_user_data - sudo sed -i '9d' configurator_user_data - cd $CUR_DIR -} - -function launch_configuratorVM { - echo "Collecting ImageId : for $configurator_image_name" - ImageId=`glance image-list | grep $configurator_image_name | awk '{print $2}'` - if [ ! -z "$ImageId" -a "$ImageId" != " " ]; then - echo $ImageId - else - echo "No image found with name $configurator_image_name ..." - exit - fi - - configure_configurator_user_data - nova boot --flavor m1.medium --user-data /opt/configurator_user_data --image $ImageId --nic port-id=$configurator_port_id $ConfiguratorInstanceName - sleep 10 -} - +# copy_nfp_files_and_start_process() - Setup configuration and start processes function copy_nfp_files_and_start_process { - cd /opt/stack/gbp/gbpservice/nfp - sudo cp -r bin/nfp /usr/bin/ + cd $NFPSERVICE_DIR/gbpservice/nfp + sudo cp -r bin/nfp /usr/bin/ sudo chmod +x /usr/bin/nfp sudo rm -rf /etc/nfp_* - sudo cp -r bin/nfp_orchestrator.ini /etc/ - sudo cp -r bin/nfp_proxy_agent.ini /etc/ - [[ $NFP_DEVSTACK_MODE != base ]] && sudo cp -r bin/nfp_config_orch.ini /etc/ - sudo cp -r bin/nfp_proxy.ini /etc/nfp_proxy.ini - sudo cp -r bin/nfp_proxy /usr/bin/ + sudo cp -r bin/nfp_orchestrator.ini /etc/ + sudo cp -r bin/nfp_proxy_agent.ini /etc/ + [[ $NFP_DEVSTACK_MODE = advanced ]] && sudo cp -r ../contrib/nfp/bin/nfp_config_orch.ini /etc/ + sudo cp -r bin/nfp_proxy.ini /etc/nfp_proxy.ini + sudo cp -r bin/nfp_proxy /usr/bin/ if [[ $NFP_DEVSTACK_MODE = base ]]; then - IpAddr=127.0.0.1 - CONFIGURATOR_PORT=8080 + configurator_ip=127.0.0.1 + configurator_port=8080 else - CONFIGURATOR_PORT=8070 - IpAddr=$configurator_ip + configurator_ip=$configurator_ip + configurator_port=8070 fi + echo "Configuring proxy.ini .... with rest_server_address as $configurator_ip:$configurator_port" + sudo sed -i "s/rest_server_address=*.*/rest_server_address=$configurator_ip/g" /etc/nfp_proxy.ini + sudo sed -i "s/rest_server_port= *.*/rest_server_port=$configurator_port/g" /etc/nfp_proxy.ini - echo "Configuring proxy.ini .... with rest_server_address as $IpAddr" - sudo sed -i "s/rest_server_address=*.*/rest_server_address=$IpAddr/g" /etc/nfp_proxy.ini - sudo sed -i "s/rest_server_port= *.*/rest_server_port=$CONFIGURATOR_PORT/g" /etc/nfp_proxy.ini + sed -i 's#source.*#source '$DEVSTACK_DIR'/openrc demo demo#g' $NFPSERVICE_DIR/devstack/exercises/nfp_service/*.sh + source $DEVSTACK_DIR/functions-common - sed -i 's#source.*#source '$TOP_DIR'/openrc demo demo#g' /opt/stack/gbp/devstack/exercises/nfp_service/*.sh - source $TOP_DIR/functions-common - - echo "Starting orchestrator >>>> under screen named : nfp_orchestrator" - run_process nfp_orchestrator "sudo /usr/bin/nfp --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/nfp_orchestrator.ini --log-file /opt/stack/logs/nfp_orchestrator.log" + echo "Starting nfp_orchestrator under screen named nfp_orchestrator" + run_process nfp_orchestrator "sudo /usr/bin/nfp --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/nfp_orchestrator.ini --log-file $DEST/logs/nfp_orchestrator.log" sleep 4 - echo "Starting proxy_agent >>>> under screen named : nfp_proxy_agent" - run_process nfp_proxy_agent "sudo /usr/bin/nfp --config-file /etc/nfp_proxy_agent.ini --log-file /opt/stack/logs/nfp_proxy_agent.log" + echo "Starting nfp_proxy_agent under screen named nfp_proxy_agent" + run_process nfp_proxy_agent "sudo /usr/bin/nfp --config-file /etc/nfp_proxy_agent.ini --log-file $DEST/logs/nfp_proxy_agent.log" sleep 4 - echo "Starting proxy server under Namespace : nfp-proxy namespace >>>> under screen named : nfp_proxy" - run_process nfp_proxy "source $NFPSERVICE_DIR/devstack/lib/nfp; namespace_delete $TOP_DIR; namespace_create $TOP_DIR $IpAddr" + echo "Starting nfp_proxy inside namespace named nfp-proxy, under screen named nfp_proxy" + run_process nfp_proxy "source $NFPSERVICE_DIR/devstack/lib/nfp; namespace_delete; namespace_create" sleep 10 - if [[ $NFP_DEVSTACK_MODE != base ]]; then - echo "Starting nfp config orchestrator >>>> under screen named : nfp_config_orchestrator" - run_process nfp_config_orchestrator "sudo /usr/bin/nfp --config-file /etc/nfp_config_orch.ini --config-file /etc/neutron/neutron.conf --log-file /opt/stack/logs/nfp_config_orchestrator.log" + if [[ $NFP_DEVSTACK_MODE = advanced ]]; then + echo "Starting nfp_config_orchestrator under screen named nfp_config_orchestrator" + run_process nfp_config_orchestrator "sudo /usr/bin/nfp --config-file /etc/nfp_config_orch.ini --config-file /etc/neutron/neutron.conf --log-file $DEST/logs/nfp_config_orchestrator.log" else cd pecan/api sudo python setup.py develop - echo "Starting nfp_base_configurator >>>> under screen named : nfp_base_configurator" - run_process nfp_base_configurator "cd /opt/stack/gbp/gbpservice/nfp/pecan/api; sudo ip netns exec nfp-proxy pecan configurator_decider config.py --mode base" + echo "Starting nfp_base_configurator under screen named nfp_base_configurator" + run_process nfp_base_configurator "cd $NFPSERVICE_DIR/gbpservice/nfp/pecan/api; sudo ip netns exec nfp-proxy pecan configurator_decider config.py --mode base" fi sleep 1 - echo "Running gbp-db-manage" - source $TOP_DIR/openrc neutron service + echo "Upgrading DB to HEAD" + source $DEVSTACK_DIR/openrc neutron service gbp-db-manage --config-file /etc/neutron/neutron.conf upgrade head sleep 2 - echo "NFP configuration done...!! " -} - -function nfp_logs_forword { - VISIBILITY_CONF="/etc/rsyslog.d/visibility.conf" - SYSLOG_CONFIG="/etc/rsyslog.conf" - log_facility=local1 - - sudo sed -i '/#$ModLoad imudp/ s/^#//' $SYSLOG_CONFIG - sudo sed -i '/#$UDPServerRun 514/ s/^#//' $SYSLOG_CONFIG - echo "Successfully enabled UDP in syslog" - - visibility_vm_ip_address=$(neutron floatingip-list --format value | grep "$IpAddr2" | awk '{print $3}') - echo "$log_facility.* @$visibility_vm_ip_address:514" | sudo tee $VISIBILITY_CONF - echo "Created $VISIBILITY_CONF file" - - sudo service rsyslog restart - if [ $? -ne 0 ]; then - echo "ERROR: Failed to restart rsyslog" - fi -} - -function configure_visibility_user_data { - CUR_DIR=$PWD - visibility_vm_ip=$1 - - sudo rm -rf /opt/visibility_user_data - sudo cp -r $DEST/gbp/devstack/exercises/nfp_service/user-data/visibility_user_data /opt/. - cd /opt - - sudo rm -rf my.key my.key.pub - sudo ssh-keygen -t rsa -N "" -f my.key - value=`sudo cat my.key.pub` - sudo echo $value - sudo sed -i "s||${value}|" visibility_user_data - - sudo sed -i "s/visibility_vm_ip=*.*/visibility_vm_ip=$visibility_vm_ip/g" visibility_user_data - sudo sed -i "s/os_controller_ip=*.*/os_controller_ip=$HOST_IP/g" visibility_user_data - sudo sed -i "s/statsd_host=*.*/statsd_host=$visibility_vm_ip/g" visibility_user_data - sudo sed -i "s/rabbit_host=*.*/rabbit_host=$configurator_ip/g" visibility_user_data - - cd $CUR_DIR -} - -function attach_security_groups { - unset OS_USER_DOMAIN_ID - unset OS_PROJECT_DOMAIN_ID - - SecGroup="allow_all" - nova secgroup-create $SecGroup "allow all traffic" - nova secgroup-add-rule $SecGroup udp 1 65535 120.0.0.0/24 - nova secgroup-add-rule $SecGroup icmp -1 -1 120.0.0.0/24 - nova secgroup-add-rule $SecGroup tcp 1 65535 120.0.0.0/24 - nova secgroup-add-rule $SecGroup tcp 80 80 0.0.0.0/0 - nova secgroup-add-rule $SecGroup udp 514 514 0.0.0.0/0 - nova secgroup-add-rule $SecGroup tcp 443 443 0.0.0.0/0 -} - -function launch_visibilityVM { - neutron net-create visibility-network - neutron subnet-create visibility-network 188.0.0.0/24 --name visibility-subnet - neutron router-create visibility-router - neutron router-gateway-set visibility-router $EXT_NET_NAME - neutron router-interface-add visibility-router visibility-subnet - ExtPortId=$(neutron port-create visibility-network | grep ' id ' | awk '{print $4}') - - fip_id=$(neutron floatingip-create $EXT_NET_NAME | grep ' id '| awk '{print $4}') - neutron floatingip-associate $fip_id $ExtPortId - - IpAddr_extractor=`neutron port-list --format value|grep $ExtPortId|awk '{print $6}'` - IpAddr_purge_last=${IpAddr_extractor::-1} - IpAddr2=${IpAddr_purge_last//\"/} - echo "Collecting IpAddr : for $ExtPortId" - echo $IpAddr2 - - configure_visibility_user_data $visibility_ip - - echo "Collecting ImageId : for $visibility_image_name" - ImageId=`glance image-list|grep $visibility_image_name |awk '{print $2}'` - if [ ! -z "$ImageId" -a "$ImageId" != " " ]; then - echo $ImageId - else - echo "No image found with name $visibility_image_name ..." - exit - fi - - attach_security_groups - echo "Launching Visibility image" - nova boot --image $ImageId --flavor m1.xlarge --user-data /opt/visibility_user_data --nic port-id=$visibility_port_id --nic port-id=$ExtPortId $VisibilityInstanceName - - sleep 10 - nova add-secgroup $VisibilityInstanceName $SecGroup + echo "NFP configuration done." } diff --git a/devstack/local.conf.nfp b/devstack/local.conf.nfp index 2287a03c16..8b75948ff9 100644 --- a/devstack/local.conf.nfp +++ b/devstack/local.conf.nfp @@ -16,52 +16,42 @@ SERVICE_TOKEN=admin # NFP Service ENABLE_NFP=True -# Edit NFP_DEVSTACK_MODE with either 'base' or 'advanced' or 'enterprise' -[[ $ENABLE_NFP = True ]] && NFP_DEVSTACK_MODE=base -[[ $NFP_DEVSTACK_MODE != base ]] && enable_plugin neutron-vpnaas https://git.openstack.org/openstack/neutron-vpnaas stable/mitaka -[[ $NFP_DEVSTACK_MODE != base ]] && enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer.git stable/mitaka - -if [[ $NFP_DEVSTACK_MODE = base ]]; then - GBPSERVICE_REPO=https://git.openstack.org/openstack/group-based-policy - GBPSERVICE_BRANCH=master -elif [[ $NFP_DEVSTACK_MODE = advanced ]]; then - # Openstack repo - #GBPSERVICE_REPO=https://git.openstack.org/openstack/group-based-policy - #GBPSERVICE_BRANCH=refs/changes/05/335405/27 - # Oneconvergence repo - GBPSERVICE_REPO=https://github.com/oneconvergence/group-based-policy.git - GBPSERVICE_BRANCH=mitaka_21st_march_base -else - GBPSERVICE_REPO=https://github.com/oneconvergence/group-based-policy.git - GBPSERVICE_BRANCH=mitaka_21st_march_base -fi -enable_plugin gbp $GBPSERVICE_REPO $GBPSERVICE_BRANCH - -if [[ $NFP_DEVSTACK_MODE != base ]]; then - # External Network - EXT_NET_GATEWAY= - EXT_NET_ALLOCATION_POOL_START= - EXT_NET_ALLOCATION_POOL_END= - EXT_NET_CIDR= - - VyosQcow2Image= - Haproxy_LBaasV2_Qcow2Image= - - # Make sure that your public interface is not attached to any bridge. - PUBLIC_INTERFACE= -fi - -if [[ $NFP_DEVSTACK_MODE = enterprise ]]; then - # Visibility GIT path - # Default visibility repo https://github.com/oneconvergence/visibility.git - VISIBILITY_GIT_BRANCH=master - GIT_ACCESS_USERNAME= - GIT_ACCESS_PASSWORD= - - # Configure docker images local repository - DOCKER_IMAGES_URL=http://192.168.100.50/docker_images/ +GBPSERVICE_REPO=https://git.openstack.org/openstack/group-based-policy +GBPSERVICE_BRANCH=master +# Edit NFP_DEVSTACK_MODE with either 'base' or 'advanced' +if [[ $ENABLE_NFP = True ]]; then + NFP_DEVSTACK_MODE=base + + if [[ $NFP_DEVSTACK_MODE = advanced ]]; then + # External Network + EXT_NET_GATEWAY= + EXT_NET_ALLOCATION_POOL_START= + EXT_NET_ALLOCATION_POOL_END= + EXT_NET_CIDR= - AsavQcow2Image= - PaloAltoQcow2Image= + # Configurator image path option, its optional + # If configured, install step uploads the specified image + # If not configured, install step will build a new one and upload it + ConfiguratorQcow2Image= + # Service VM image path options, they are optional + # If configured, install step uploads the specified images + # If not configured, install step ignores uploading these service images + VyosQcow2Image= + HaproxyQcow2Image= + + # Make sure that your public interface is not attached to any bridge. + PUBLIC_INTERFACE= + + enable_plugin neutron-vpnaas https://git.openstack.org/openstack/neutron-vpnaas stable/mitaka + enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer.git stable/mitaka + + # Openstack repo + #GBPSERVICE_REPO=https://git.openstack.org/openstack/group-based-policy + #GBPSERVICE_BRANCH=refs/changes/05/335405/27 + # Oneconvergence repo + GBPSERVICE_REPO=https://github.com/oneconvergence/group-based-policy.git + GBPSERVICE_BRANCH=mitaka_21st_march_base + fi fi +enable_plugin gbp $GBPSERVICE_REPO $GBPSERVICE_BRANCH diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 818b9a74fa..62dba15285 100755 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -5,10 +5,6 @@ function gbp_configure_nova { iniset $NOVA_CONF neutron allow_duplicate_networks "True" } -function nfp_configure_nova { - iniset $NOVA_CONF DEFAULT instance_usage_audit "True" -} - function gbp_configure_heat { local HEAT_PLUGINS_DIR="/opt/stack/gbpautomation/gbpautomation/heat" iniset $HEAT_CONF DEFAULT plugin_dirs "$HEAT_PLUGINS_DIR" @@ -55,7 +51,7 @@ function configure_nfp_loadbalancer { 's'/\ ':default'/\ '\n'\ -'service_provider = LOADBALANCER:loadbalancer:gbpservice.nfp.service_plugins.loadbalancer.drivers.nfp_lbaas_plugin_driver.HaproxyOnVMPluginDriver:default'/\ +'service_provider = LOADBALANCER:loadbalancer:gbpservice.contrib.nfp.service_plugins.loadbalancer.drivers.nfp_lbaas_plugin_driver.HaproxyOnVMPluginDriver:default'/\ /etc/neutron/neutron_lbaas.conf } @@ -67,7 +63,7 @@ function configure_nfp_firewall { '/^service_plugins/'\ 's'/\ 'neutron_fwaas.services.firewall.fwaas_plugin.FirewallPlugin'/\ -'gbpservice.nfp.service_plugins.firewall.nfp_fwaas_plugin.NFPFirewallPlugin'/\ +'gbpservice.contrib.nfp.service_plugins.firewall.nfp_fwaas_plugin.NFPFirewallPlugin'/\ /etc/neutron/neutron.conf } @@ -80,7 +76,7 @@ function configure_nfp_vpn { 's'/\ ':default'/\ '\n'\ -'service_provider = VPN:vpn:gbpservice.nfp.service_plugins.vpn.drivers.nfp_vpnaas_driver.NFPIPsecVPNDriver:default'/\ +'service_provider = VPN:vpn:gbpservice.contrib.nfp.service_plugins.vpn.drivers.nfp_vpnaas_driver.NFPIPsecVPNDriver:default'/\ /etc/neutron/neutron_vpnaas.conf } @@ -102,16 +98,12 @@ if is_service_enabled group-policy; then if [[ $ENABLE_NFP = True ]]; then echo_summary "Configuring $NFP" nfp_configure_neutron - [[ $NFP_DEVSTACK_MODE = enterprise ]] && nfp_configure_nova - if [[ $NFP_DEVSTACK_MODE != base ]]; then + if [[ $NFP_DEVSTACK_MODE = advanced ]]; then configure_nfp_loadbalancer configure_nfp_firewall configure_nfp_vpn fi fi -# install_apic_ml2 -# install_aim -# init_aim install_gbpclient install_gbpservice [[ $ENABLE_NFP = True ]] && install_nfpgbpservice @@ -129,7 +121,6 @@ if is_service_enabled group-policy; then create_nfp_gbp_resources create_nfp_image [[ $NFP_DEVSTACK_MODE = advanced ]] && launch_configuratorVM - [[ $NFP_DEVSTACK_MODE = enterprise ]] && launch_configuratorVM && launch_visibilityVM && nfp_logs_forword copy_nfp_files_and_start_process fi fi diff --git a/devstack/settings b/devstack/settings index 478e42d069..54bd1d39d0 100755 --- a/devstack/settings +++ b/devstack/settings @@ -5,9 +5,6 @@ ENABLE_NFP=${ENABLE_NFP:-False} [[ $ENABLE_NFP = True ]] && source $DEST/gbp/devstack/lib/nfp # VM locations ConfiguratorQcow2Image=${ConfiguratorQcow2Image:-build} -VisibilityQcow2Image=${VisibilityQcow2Image:-build} -VyosQcow2Image=${VyosQcow2Image:-build} -HaproxyQcow2Image=${HaproxyQcow2Image:-build} # Enable necessary Neutron plugins, including group_policy and ncp Q_SERVICE_PLUGIN_CLASSES=neutron.services.l3_router.l3_router_plugin.L3RouterPlugin,group_policy,ncp @@ -35,7 +32,7 @@ enable_service q-agt enable_service q-dhcp enable_service q-l3 enable_service q-fwaas -[[ $NFP_DEVSTACK_MODE != base ]] && enable_service neutron-vpnaas +[[ $ENABLE_NFP = True ]] && [[ $NFP_DEVSTACK_MODE = advanced ]] && enable_service neutron-vpnaas enable_service q-lbaas enable_service q-meta enable_service neutron diff --git a/gbpservice/neutron/tests/unit/nfp/base_configurator/api/__init__.py b/gbpservice/contrib/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/base_configurator/api/__init__.py rename to gbpservice/contrib/__init__.py diff --git a/gbpservice/neutron/tests/unit/nfp/base_configurator/api/v1/__init__.py b/gbpservice/contrib/nfp/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/base_configurator/api/v1/__init__.py rename to gbpservice/contrib/nfp/__init__.py diff --git a/gbpservice/nfp/bin/nfp_config_orch.ini b/gbpservice/contrib/nfp/bin/nfp_config_orch.ini similarity index 68% rename from gbpservice/nfp/bin/nfp_config_orch.ini rename to gbpservice/contrib/nfp/bin/nfp_config_orch.ini index d2cb106154..abd3fbeebe 100644 --- a/gbpservice/nfp/bin/nfp_config_orch.ini +++ b/gbpservice/contrib/nfp/bin/nfp_config_orch.ini @@ -4,7 +4,7 @@ debug=False kombu_reconnect_delay=1.0 rabbit_use_ssl=False rabbit_virtual_host=/ -nfp_modules_path=gbpservice.nfp.config_orchestrator.modules +nfp_modules_path=gbpservice.contrib.nfp.config_orchestrator.modules backend=rpc [RPC] diff --git a/gbpservice/contrib/nfp/bin/nfp_configurator.ini b/gbpservice/contrib/nfp/bin/nfp_configurator.ini new file mode 100644 index 0000000000..a11a79d513 --- /dev/null +++ b/gbpservice/contrib/nfp/bin/nfp_configurator.ini @@ -0,0 +1,23 @@ +[DEFAULT] +policy_file=/etc/policy.json +debug=False +rabbit_password=guest +rabbit_userid=guest +rabbit_hosts=127.0.0.1 +rabbit_port=5672 +rabbit_host= +kombu_reconnect_delay=1.0 +control_exchange = openstack + +rabbit_use_ssl=False + +rabbit_virtual_host=/ +workers=2 +nfp_modules_path=gbpservice.contrib.nfp.configurator.modules +reportstate_interval=10 +periodic_interval=2 + +log_forward_ip_address= +log_forward_port=514 +log_level=debug + diff --git a/gbpservice/nfp/bin/policy.json b/gbpservice/contrib/nfp/bin/policy.json similarity index 100% rename from gbpservice/nfp/bin/policy.json rename to gbpservice/contrib/nfp/bin/policy.json diff --git a/gbpservice/neutron/tests/unit/nfp/base_configurator/api/v1/controllers/__init__.py b/gbpservice/contrib/nfp/config_orchestrator/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/base_configurator/api/v1/controllers/__init__.py rename to gbpservice/contrib/nfp/config_orchestrator/__init__.py diff --git a/gbpservice/neutron/tests/unit/nfp/config_orchestrator/__init__.py b/gbpservice/contrib/nfp/config_orchestrator/common/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/config_orchestrator/__init__.py rename to gbpservice/contrib/nfp/config_orchestrator/common/__init__.py diff --git a/gbpservice/nfp/config_orchestrator/common/common.py b/gbpservice/contrib/nfp/config_orchestrator/common/common.py similarity index 98% rename from gbpservice/nfp/config_orchestrator/common/common.py rename to gbpservice/contrib/nfp/config_orchestrator/common/common.py index 982e90b487..64b1a13145 100644 --- a/gbpservice/nfp/config_orchestrator/common/common.py +++ b/gbpservice/contrib/nfp/config_orchestrator/common/common.py @@ -10,14 +10,15 @@ # License for the specific language governing permissions and limitations # under the License. -from gbpservice.nfp.config_orchestrator.common import topics as a_topics +from gbpservice.contrib.nfp.config_orchestrator.common import ( + topics as a_topics) +from gbpservice.nfp.core import log as nfp_logging from gbpservice.nfp.lib import transport from neutron.common import constants as n_constants from neutron.common import rpc as n_rpc from neutron.common import topics as n_topics -from gbpservice.nfp.core import log as nfp_logging import oslo_messaging as messaging LOG = nfp_logging.getLogger(__name__) diff --git a/gbpservice/nfp/config_orchestrator/common/lbv2_constants.py b/gbpservice/contrib/nfp/config_orchestrator/common/lbv2_constants.py similarity index 100% rename from gbpservice/nfp/config_orchestrator/common/lbv2_constants.py rename to gbpservice/contrib/nfp/config_orchestrator/common/lbv2_constants.py diff --git a/gbpservice/nfp/config_orchestrator/common/topics.py b/gbpservice/contrib/nfp/config_orchestrator/common/topics.py similarity index 100% rename from gbpservice/nfp/config_orchestrator/common/topics.py rename to gbpservice/contrib/nfp/config_orchestrator/common/topics.py diff --git a/gbpservice/neutron/tests/unit/nfp/config_orchestrator/modules/__init__.py b/gbpservice/contrib/nfp/config_orchestrator/handlers/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/config_orchestrator/modules/__init__.py rename to gbpservice/contrib/nfp/config_orchestrator/handlers/__init__.py diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/__init__.py b/gbpservice/contrib/nfp/config_orchestrator/handlers/config/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/configurator/__init__.py rename to gbpservice/contrib/nfp/config_orchestrator/handlers/config/__init__.py diff --git a/gbpservice/nfp/config_orchestrator/handlers/config/firewall.py b/gbpservice/contrib/nfp/config_orchestrator/handlers/config/firewall.py similarity index 98% rename from gbpservice/nfp/config_orchestrator/handlers/config/firewall.py rename to gbpservice/contrib/nfp/config_orchestrator/handlers/config/firewall.py index ebaeed6bd0..5a7c6ec824 100644 --- a/gbpservice/nfp/config_orchestrator/handlers/config/firewall.py +++ b/gbpservice/contrib/nfp/config_orchestrator/handlers/config/firewall.py @@ -13,7 +13,7 @@ import ast import copy -from gbpservice.nfp.config_orchestrator.common import common +from gbpservice.contrib.nfp.config_orchestrator.common import common from gbpservice.nfp.core import log as nfp_logging from gbpservice.nfp.lib import transport diff --git a/gbpservice/nfp/config_orchestrator/handlers/config/loadbalancer.py b/gbpservice/contrib/nfp/config_orchestrator/handlers/config/loadbalancer.py similarity index 99% rename from gbpservice/nfp/config_orchestrator/handlers/config/loadbalancer.py rename to gbpservice/contrib/nfp/config_orchestrator/handlers/config/loadbalancer.py index 7d62b37981..d7e3c04a2f 100644 --- a/gbpservice/nfp/config_orchestrator/handlers/config/loadbalancer.py +++ b/gbpservice/contrib/nfp/config_orchestrator/handlers/config/loadbalancer.py @@ -13,8 +13,8 @@ import ast import copy +from gbpservice.contrib.nfp.config_orchestrator.common import common from gbpservice.nfp.common import constants as const -from gbpservice.nfp.config_orchestrator.common import common from gbpservice.nfp.core import log as nfp_logging from gbpservice.nfp.lib import transport diff --git a/gbpservice/nfp/config_orchestrator/handlers/config/loadbalancerv2.py b/gbpservice/contrib/nfp/config_orchestrator/handlers/config/loadbalancerv2.py similarity index 97% rename from gbpservice/nfp/config_orchestrator/handlers/config/loadbalancerv2.py rename to gbpservice/contrib/nfp/config_orchestrator/handlers/config/loadbalancerv2.py index 828b0491f9..a967ef896d 100644 --- a/gbpservice/nfp/config_orchestrator/handlers/config/loadbalancerv2.py +++ b/gbpservice/contrib/nfp/config_orchestrator/handlers/config/loadbalancerv2.py @@ -13,8 +13,8 @@ import ast import copy +from gbpservice.contrib.nfp.config_orchestrator.common import common from gbpservice.nfp.common import constants as const -from gbpservice.nfp.config_orchestrator.common import common from gbpservice.nfp.core import log as nfp_logging from gbpservice.nfp.lib import transport @@ -87,7 +87,7 @@ def _context(self, **kwargs): if context.is_admin: kwargs['tenant_id'] = context.tenant_id core_db = self._get_core_context(context, kwargs['tenant_id']) - # TODO(jiahao): _get_lb_context() fails for flavor_id, disable it + # REVISIT(jiahao): _get_lb_context() fails for flavor_id, disable it # for now. Sent the whole core_db to cofigurator # lb_db = self._get_lb_context(**kwargs) # db = self._filter_service_info_with_resource(lb_db, core_db) @@ -139,8 +139,7 @@ def _data_wrapper(self, context, tenant_id, name, reason, nf, **kwargs): 'context': context, 'description': str(description)} - ctx_dict, rsrc_ctx_dict = self.\ - _prepare_resource_context_dicts(**args) + ctx_dict, rsrc_ctx_dict = self._prepare_resource_context_dicts(**args) nfp_context.update({'neutron_context': ctx_dict, 'requester': 'nas_service', @@ -172,7 +171,7 @@ def _fetch_nf_from_resource_desc(self, desc): nf_id = desc_dict['network_function_id'] return nf_id - #TODO(jiahao): Argument allocate_vip and + # REVISIT(jiahao): Argument allocate_vip and # delete_vip_port are not implememnted. @log_helpers.log_method_call def create_loadbalancer(self, context, loadbalancer, driver_name, @@ -295,7 +294,7 @@ def delete_healthmonitor(self, context, healthmonitor): 'healthmonitor', nf, healthmonitor=healthmonitor) nfp_logging.clear_logging_context() - # TODO(jiahao): L7policy support not implemented + # REVISIT(jiahao): L7policy support not implemented # disable L7policy # def create_l7policy(self, context, l7policy): # self._post( diff --git a/gbpservice/nfp/config_orchestrator/handlers/config/vpn.py b/gbpservice/contrib/nfp/config_orchestrator/handlers/config/vpn.py similarity index 99% rename from gbpservice/nfp/config_orchestrator/handlers/config/vpn.py rename to gbpservice/contrib/nfp/config_orchestrator/handlers/config/vpn.py index b7f430d38d..6daf8daaf9 100644 --- a/gbpservice/nfp/config_orchestrator/handlers/config/vpn.py +++ b/gbpservice/contrib/nfp/config_orchestrator/handlers/config/vpn.py @@ -12,7 +12,7 @@ import ast import copy -from gbpservice.nfp.config_orchestrator.common import common +from gbpservice.contrib.nfp.config_orchestrator.common import common from gbpservice.nfp.core import log as nfp_logging from gbpservice.nfp.lib import transport diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/agents/__init__.py b/gbpservice/contrib/nfp/config_orchestrator/handlers/notification/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/configurator/agents/__init__.py rename to gbpservice/contrib/nfp/config_orchestrator/handlers/notification/__init__.py diff --git a/gbpservice/nfp/config_orchestrator/handlers/notification/handler.py b/gbpservice/contrib/nfp/config_orchestrator/handlers/notification/handler.py similarity index 94% rename from gbpservice/nfp/config_orchestrator/handlers/notification/handler.py rename to gbpservice/contrib/nfp/config_orchestrator/handlers/notification/handler.py index da5adf448a..a3f9961c0e 100644 --- a/gbpservice/nfp/config_orchestrator/handlers/notification/handler.py +++ b/gbpservice/contrib/nfp/config_orchestrator/handlers/notification/handler.py @@ -10,18 +10,19 @@ # License for the specific language governing permissions and limitations # under the License. +import sys +import traceback + +from gbpservice.contrib.nfp.config_orchestrator.common import ( + lbv2_constants as lbv2_const) +from gbpservice.contrib.nfp.config_orchestrator.common import ( + topics as a_topics) from gbpservice.nfp.common import constants as const -from gbpservice.nfp.config_orchestrator.common import lbv2_constants \ - as lbv2_const -from gbpservice.nfp.config_orchestrator.common import topics as a_topics from gbpservice.nfp.core import log as nfp_logging from gbpservice.nfp.lib import transport import oslo_messaging as messaging -import sys -import traceback - LOG = nfp_logging.getLogger(__name__) @@ -53,10 +54,11 @@ def network_function_notification(self, context, notification_data): except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() msg = ("Generic exception (%s) while handling message (%s) : %s" - % (e, notification_data, traceback.format_exception( - exc_type, - exc_value, - exc_traceback))) + % (e, + notification_data, + traceback.format_exception(exc_type, + exc_value, + exc_traceback))) LOG.info(msg) @@ -302,8 +304,9 @@ def handle_notification(self, context, notification_data): except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() msg = ("Generic exception (%s) while handling message (%s) : %s" - % (e, notification_data, traceback.format_exception( - exc_type, - exc_value, - exc_traceback))) + % (e, + notification_data, + traceback.format_exception(exc_type, + exc_value, + exc_traceback))) LOG.error(msg) diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/api/__init__.py b/gbpservice/contrib/nfp/config_orchestrator/modules/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/configurator/api/__init__.py rename to gbpservice/contrib/nfp/config_orchestrator/modules/__init__.py diff --git a/gbpservice/nfp/config_orchestrator/modules/config_orch.py b/gbpservice/contrib/nfp/config_orchestrator/modules/config_orch.py similarity index 65% rename from gbpservice/nfp/config_orchestrator/modules/config_orch.py rename to gbpservice/contrib/nfp/config_orchestrator/modules/config_orch.py index 7b8d412bb3..5df4a491ee 100644 --- a/gbpservice/nfp/config_orchestrator/modules/config_orch.py +++ b/gbpservice/contrib/nfp/config_orchestrator/modules/config_orch.py @@ -10,24 +10,20 @@ # License for the specific language governing permissions and limitations # under the License. -from gbpservice.nfp.config_orchestrator.common import topics as a_topics -from gbpservice.nfp.config_orchestrator.handlers.config import ( +from gbpservice.contrib.nfp.config_orchestrator.common import ( + topics as a_topics) +from gbpservice.contrib.nfp.config_orchestrator.handlers.config import ( firewall as fw) -from gbpservice.nfp.config_orchestrator.handlers.config import ( +from gbpservice.contrib.nfp.config_orchestrator.handlers.config import ( loadbalancer as lb) -from gbpservice.nfp.config_orchestrator.handlers.config import ( +from gbpservice.contrib.nfp.config_orchestrator.handlers.config import ( loadbalancerv2 as lbv2) -from gbpservice.nfp.config_orchestrator.handlers.config import vpn -from gbpservice.nfp.config_orchestrator.handlers.event import ( - handler as v_handler) -from gbpservice.nfp.config_orchestrator.handlers.notification import ( +from gbpservice.contrib.nfp.config_orchestrator.handlers.config import vpn +from gbpservice.contrib.nfp.config_orchestrator.handlers.notification import ( handler as notif_handler) from gbpservice.nfp.core.rpc import RpcAgent -from gbpservice.nfp.lib import transport -from neutron import context as n_context from oslo_config import cfg -import time def rpc_init(sc, conf): @@ -107,31 +103,5 @@ def rpc_init(sc, conf): sc.register_rpc_agents([fwagent, lbagent, lbv2agent, vpnagent, rpcagent]) -def events_init(sc, conf): - """Register event with its handler.""" - evs = v_handler.event_init(sc, conf) - sc.register_events(evs) - - def nfp_module_init(sc, conf): rpc_init(sc, conf) - events_init(sc, conf) - - -def nfp_module_post_init(sc, conf): - ev = sc.new_event(id='SERVICE_OPERATION_POLL_EVENT', - key='SERVICE_OPERATION_POLL_EVENT') - sc.post_event(ev) - - uptime = time.strftime("%c") - body = {'eventdata': {'uptime': uptime, - 'module': 'config_orchestrator'}, - 'eventid': 'NFP_UP_TIME', - 'eventtype': 'NFP_CONTROLLER'} - context = n_context.Context('config_agent_user', 'config_agent_tenant') - transport.send_request_to_configurator(conf, - context, - body, - 'CREATE', - network_function_event=True, - override_backend='tcp_rest') diff --git a/gbpservice/nfp/config_orchestrator/handlers/event/handler.py b/gbpservice/contrib/nfp/config_orchestrator/modules/enterprise.py similarity index 95% rename from gbpservice/nfp/config_orchestrator/handlers/event/handler.py rename to gbpservice/contrib/nfp/config_orchestrator/modules/enterprise.py index d1c3b40098..1c476063ca 100644 --- a/gbpservice/nfp/config_orchestrator/handlers/event/handler.py +++ b/gbpservice/contrib/nfp/config_orchestrator/modules/enterprise.py @@ -16,7 +16,7 @@ import time import traceback -from gbpservice.nfp.config_orchestrator.common import common +from gbpservice.contrib.nfp.config_orchestrator.common import common from gbpservice.nfp.core.event import Event from gbpservice.nfp.core import log as nfp_logging from gbpservice.nfp.core import module as nfp_api @@ -56,7 +56,34 @@ def event_init(sc, conf): handler=EventsHandler(sc, conf)), Event(id='SERVICE_CREATE_PENDING', handler=EventsHandler(sc, conf))] - return evs + + sc.register_events(evs) + + +def nfp_module_init(sc, conf): + event_init(sc, conf) + + +def nfp_module_post_init(sc, conf): + try: + ev = sc.new_event(id='SERVICE_OPERATION_POLL_EVENT', + key='SERVICE_OPERATION_POLL_EVENT') + sc.post_event(ev) + except Exception as e: + msg = ("%s" % (e)) + LOG.error(msg) + uptime = time.strftime("%c") + body = {'eventdata': {'uptime': uptime, + 'module': 'config_orchestrator'}, + 'eventid': 'NFP_UP_TIME', + 'eventtype': 'NFP_CONTROLLER'} + context = n_context.Context('config_agent_user', 'config_agent_tenant') + transport.send_request_to_configurator(conf, + context, + body, + 'CREATE', + network_function_event=True, + override_backend='tcp_rest') """Periodic Class to service events for visiblity.""" diff --git a/gbpservice/nfp/configurator/Dockerfile b/gbpservice/contrib/nfp/configurator/Dockerfile similarity index 77% rename from gbpservice/nfp/configurator/Dockerfile rename to gbpservice/contrib/nfp/configurator/Dockerfile index 12e2843a29..64724e32c1 100644 --- a/gbpservice/nfp/configurator/Dockerfile +++ b/gbpservice/contrib/nfp/configurator/Dockerfile @@ -43,25 +43,21 @@ RUN cur_dir=$PWD RUN cd /pan-python && sudo ./setup.py install RUN cd $cur_dir +# Cache buster +ADD https://www.random.org/strings/?num=10&len=8&digits=on&upperalpha=on&loweralpha=on&unique=on&format=plain&rnd=new cache-buster + RUN git clone -b stable/mitaka --single-branch https://github.com/openstack/neutron-lib.git neutron_lib RUN cp -r /neutron_lib/neutron_lib /usr/local/lib/python2.7/dist-packages/ RUN git clone -b stable/mitaka --single-branch https://github.com/openstack/neutron.git neutron RUN cp -r /neutron/neutron /usr/local/lib/python2.7/dist-packages/ - -# Openstack Repo -#RUN git clone https://github.com/openstack/group-based-policy.git group-based-policy -#RUN cd /group-based-policy && git fetch https://git.openstack.org/openstack/group-based-policy GIT-BRANCH-NAME && git checkout FETCH_HEAD - -# Oneconvergence Repo RUN git clone -b GIT-BRANCH-NAME --single-branch https://github.com/oneconvergence/group-based-policy.git group-based-policy - +# RUN git clone https://github.com/openstack/group-based-policy.git group-based-policy +# RUN cd /group-based-policy && git fetch https://git.openstack.org/openstack/group-based-policy GIT-BRANCH-NAME && git checkout FETCH_HEAD RUN cp -r /group-based-policy/gbpservice /usr/local/lib/python2.7/dist-packages/ RUN cp /group-based-policy/gbpservice/nfp/bin/nfp /usr/bin/ RUN chmod +x /usr/bin/nfp -RUN cp /group-based-policy/gbpservice/nfp/bin/nfp_configurator.ini /etc/ -RUN sed -i "s/log_forward_ip_address=*.*/log_forward_ip_address=VIS_VM_IP_ADDRESS/" /etc/nfp_configurator.ini -RUN cp /group-based-policy/gbpservice/nfp/bin/policy.json /etc/ -RUN cp -r /group-based-policy/gbpservice/nfp/configurator/config /etc/nfp_config +RUN cp /group-based-policy/gbpservice/contrib/nfp/bin/nfp_configurator.ini /etc/ +RUN cp /group-based-policy/gbpservice/contrib/nfp/bin/policy.json /etc/ RUN mkdir -p /var/log/nfp RUN touch /var/log/nfp/nfp_configurator.log RUN touch /var/log/nfp/nfp_pecan.log diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/api/v1/__init__.py b/gbpservice/contrib/nfp/configurator/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/configurator/api/v1/__init__.py rename to gbpservice/contrib/nfp/configurator/__init__.py diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/api/v1/controllers/__init__.py b/gbpservice/contrib/nfp/configurator/advanced_controller/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/configurator/api/v1/controllers/__init__.py rename to gbpservice/contrib/nfp/configurator/advanced_controller/__init__.py diff --git a/gbpservice/nfp/configurator/api/v1/controllers/controller.py b/gbpservice/contrib/nfp/configurator/advanced_controller/controller.py similarity index 91% rename from gbpservice/nfp/configurator/api/v1/controllers/controller.py rename to gbpservice/contrib/nfp/configurator/advanced_controller/controller.py index d24c7d92a8..db58f496aa 100644 --- a/gbpservice/nfp/configurator/api/v1/controllers/controller.py +++ b/gbpservice/contrib/nfp/configurator/advanced_controller/controller.py @@ -12,40 +12,38 @@ import oslo_serialization.jsonutils as jsonutils -# from neutron.agent.common import config from neutron.common import rpc as n_rpc from oslo_config import cfg from oslo_log import log as logging import oslo_messaging import pecan -from gbpservice.nfp.configurator.api.base_controller import BaseController +from gbpservice.nfp.pecan import base_controller LOG = logging.getLogger(__name__) n_rpc.init(cfg.CONF) -"""Implements all the APIs Invoked by HTTP requests. -Implements following HTTP methods. - -get - -post - -put -According to the HTTP request received from config-agent this class make -call/cast to configurator and return response to config-agent +class Controller(base_controller.BaseController): + """Implements all the APIs Invoked by HTTP requests. -""" + Implements following HTTP methods. + -get + -post + -put + According to the HTTP request received from config-agent this class make + call/cast to configurator and return response to config-agent - -class Controller(BaseController): + """ def __init__(self, method_name): try: + self.method_name = method_name self.services = pecan.conf['cloud_services'] self.rpc_routing_table = {} for service in self.services: self._entry_to_rpc_routing_table(service) - self.method_name = method_name super(Controller, self).__init__() except Exception as err: msg = ( @@ -195,18 +193,16 @@ def _format_description(self, msg): return error_data -"""Implements call/cast methods used in REST Controller. - -Implements following methods. - -call - -cast -This class send an RPC call/cast to configurator according to the data sent -by Controller class of REST server. - - """ +class RPCClient(object): + """Implements call/cast methods used in REST Controller. + Implements following methods. + -call + -cast + This class send an RPC call/cast to configurator according to the data sent + by Controller class of REST server. -class RPCClient(object): + """ API_VERSION = '1.0' @@ -263,12 +259,10 @@ def to_dict(self): return {} -""" CloudService keeps all information of uservice along with initialized - RPCClient object using which rpc is routed to over the cloud service. -""" - - class CloudService(object): + """ CloudService keeps all information of uservice along with initialized + RPCClient object using which rpc is routed to over the cloud service. + """ def __init__(self, **kwargs): self.service_name = kwargs.get('service_name') diff --git a/gbpservice/nfp/configurator/api/v1/controllers/__init__.py b/gbpservice/contrib/nfp/configurator/advanced_controller/controller_loader.py similarity index 96% rename from gbpservice/nfp/configurator/api/v1/controllers/__init__.py rename to gbpservice/contrib/nfp/configurator/advanced_controller/controller_loader.py index 3dd0d6079e..46e418b159 100644 --- a/gbpservice/nfp/configurator/api/v1/controllers/__init__.py +++ b/gbpservice/contrib/nfp/configurator/advanced_controller/controller_loader.py @@ -12,7 +12,8 @@ import pecan -import controller +from gbpservice.contrib.nfp.configurator.advanced_controller import ( + controller) """This class forwards HTTP request to controller class. diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/drivers/__init__.py b/gbpservice/contrib/nfp/configurator/agents/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/configurator/drivers/__init__.py rename to gbpservice/contrib/nfp/configurator/agents/__init__.py diff --git a/gbpservice/nfp/configurator/agents/agent_base.py b/gbpservice/contrib/nfp/configurator/agents/agent_base.py similarity index 93% rename from gbpservice/nfp/configurator/agents/agent_base.py rename to gbpservice/contrib/nfp/configurator/agents/agent_base.py index fdd509e0f7..426027d882 100644 --- a/gbpservice/nfp/configurator/agents/agent_base.py +++ b/gbpservice/contrib/nfp/configurator/agents/agent_base.py @@ -10,21 +10,21 @@ # License for the specific language governing permissions and limitations # under the License. -from gbpservice.nfp.core import module as nfp_api -from gbpservice.nfp.configurator.lib import constants as const +from gbpservice.contrib.nfp.configurator.lib import constants as const from gbpservice.nfp.core import log as nfp_logging +from gbpservice.nfp.core import module as nfp_api LOG = nfp_logging.getLogger(__name__) -"""Implements base class for all service agents. -Common methods for service agents are implemented in this class. Configurator -module invokes these methods through the service agent's child class instance. +class AgentBaseRPCManager(object): + """Implements base class for all service agents. -""" + Common methods for service agents are implemented in this class. + Configurator module invokes these methods through the service + agent's child class instance. - -class AgentBaseRPCManager(object): + """ def __init__(self, sc, conf): self.sc = sc @@ -68,7 +68,7 @@ def process_request(self, sa_req_list, notification_data): # In case of malformed input, send failure notification if not self.validate_request(sa_req_list, notification_data): - # TODO(JAGADISH): Need to send failure notification + # REVISIT(JAGADISH): Need to send failure notification return # Multiple request data blobs needs batch processing. Send batch @@ -109,6 +109,11 @@ def process_request(self, sa_req_list, notification_data): class AgentBaseNotification(object): + """Enqueues notification event into notification queue + + Responses from the REST calls made to the VM are fed to under the + cloud components using this notification handle. + """ def __init__(self, sc): self.sc = sc @@ -130,6 +135,9 @@ def _notification(self, data): class AgentBaseEventHandler(nfp_api.NfpEventHandler): + """ Super class for all agents to handle batch events. + + """ def __init__(self, sc, drivers, rpcmgr): self.sc = sc diff --git a/gbpservice/nfp/configurator/agents/firewall.py b/gbpservice/contrib/nfp/configurator/agents/firewall.py similarity index 93% rename from gbpservice/nfp/configurator/agents/firewall.py rename to gbpservice/contrib/nfp/configurator/agents/firewall.py index e7110c2065..78c823f2f4 100644 --- a/gbpservice/nfp/configurator/agents/firewall.py +++ b/gbpservice/contrib/nfp/configurator/agents/firewall.py @@ -14,25 +14,24 @@ import oslo_messaging as messaging import requests -from gbpservice.nfp.configurator.agents import agent_base -from gbpservice.nfp.configurator.lib import constants as common_const -from gbpservice.nfp.configurator.lib import fw_constants as const -from gbpservice.nfp.configurator.lib import utils as load_driver +from gbpservice.contrib.nfp.configurator.agents import agent_base +from gbpservice.contrib.nfp.configurator.lib import constants as common_const +from gbpservice.contrib.nfp.configurator.lib import fw_constants as const +from gbpservice.contrib.nfp.configurator.lib import utils as load_driver from gbpservice.nfp.core import event as nfp_event from gbpservice.nfp.core import log as nfp_logging from gbpservice.nfp.core import module as nfp_api LOG = nfp_logging.getLogger(__name__) -""" Implements Fwaas response path to Neutron plugin. -Methods of this class are invoked by the FwaasEventHandler class -for sending response from driver to the Fwaas Neutron plugin. - -""" +class FwaasRpcSender(agent_base.AgentBaseEventHandler): + """ Implements Fwaas response path to Neutron plugin. + Methods of this class are invoked by the FwaasEventHandler class + for sending response from driver to the Fwaas Neutron plugin. -class FwaasRpcSender(agent_base.AgentBaseEventHandler): + """ def __init__(self, sc, host, drivers, rpcmgr): super(FwaasRpcSender, self).__init__(sc, drivers, rpcmgr) @@ -81,16 +80,16 @@ def firewall_deleted(self, agent_info, firewall_id, firewall=None): } self.notify._notification(msg) -""" Implements FWaasRpcManager class which receives requests - from Configurator to Agent. -Methods of this class are invoked by the configurator. Events are -created according to the requests received and enqueued to worker queues. +class FWaasRpcManager(agent_base.AgentBaseRPCManager): + """ Implements FWaasRpcManager class which receives requests + from Configurator to Agent. -""" + Methods of this class are invoked by the configurator. Events are + created according to the requests received and enqueued to worker queues. + """ -class FWaasRpcManager(agent_base.AgentBaseRPCManager): RPC_API_VERSION = '1.0' target = messaging.Target(version=RPC_API_VERSION) @@ -156,15 +155,15 @@ def delete_firewall(self, context, firewall, host): self._create_event(context, firewall, host, const.FIREWALL_DELETE_EVENT) -""" Handler class which invokes firewall driver methods -Worker processes dequeue the worker queues and invokes the -appropriate handler class methods for Fwaas methods. +class FWaasEventHandler(nfp_api.NfpEventHandler): + """ Handler class which invokes firewall driver methods -""" + Worker processes dequeue the worker queues and invokes the + appropriate handler class methods for Fwaas methods. + """ -class FWaasEventHandler(nfp_api.NfpEventHandler): def __init__(self, sc, drivers, rpcmgr, conf): """ Instantiates class object. @@ -278,7 +277,7 @@ def invoke_driver_for_plugin_api(self, ev): try: status = self.method(context, firewall, host) except requests.ConnectionError: - # FIXME It can't be correct everytime + # REVISIT(VIKASH): It can't be correct everytime msg = ("There is a connection error for firewall %r of " "tenant %r. Assuming either there is serious " "issue with VM or data path is completely " @@ -289,7 +288,7 @@ def invoke_driver_for_plugin_api(self, ev): agent_info, firewall['id'], firewall) except Exception as err: - # TODO(VIKASH) Is it correct to raise ? As the subsequent + # REVISIT(VIKASH): Is it correct to raise ? As the subsequent # attempt to clean will only re-raise the last one.And it # can go on and on and may not be ever recovered. self.plugin_rpc.set_firewall_status( diff --git a/gbpservice/nfp/configurator/agents/generic_config.py b/gbpservice/contrib/nfp/configurator/agents/generic_config.py similarity index 94% rename from gbpservice/nfp/configurator/agents/generic_config.py rename to gbpservice/contrib/nfp/configurator/agents/generic_config.py index dbc00750d0..5e8bb3e59e 100644 --- a/gbpservice/nfp/configurator/agents/generic_config.py +++ b/gbpservice/contrib/nfp/configurator/agents/generic_config.py @@ -12,29 +12,29 @@ import os -from gbpservice.nfp.configurator.agents import agent_base -from gbpservice.nfp.configurator.lib import ( +from gbpservice.contrib.nfp.configurator.agents import agent_base +from gbpservice.contrib.nfp.configurator.lib import ( generic_config_constants as gen_cfg_const) -from gbpservice.nfp.configurator.lib import constants as common_const -from gbpservice.nfp.configurator.lib import utils +from gbpservice.contrib.nfp.configurator.lib import constants as common_const +from gbpservice.contrib.nfp.configurator.lib import utils from gbpservice.nfp.core import event as nfp_event -from gbpservice.nfp.core import module as nfp_api from gbpservice.nfp.core import log as nfp_logging +from gbpservice.nfp.core import module as nfp_api LOG = nfp_logging.getLogger(__name__) -"""Implements APIs invoked by configurator for processing RPC messages. -RPC client of configurator module receives RPC messages from REST server -and invokes the API of this class. The instance of this class is registered -with configurator module using register_service_agent API. Configurator module -identifies the service agent object based on service type and invokes ones of -the methods of this class to configure the device. +class GenericConfigRpcManager(agent_base.AgentBaseRPCManager): + """Implements APIs invoked by configurator for processing RPC messages. -""" + RPC client of configurator module receives RPC messages from REST server + and invokes the API of this class. The instance of this class is registered + with configurator module using register_service_agent API. Configurator + module identifies the service agent object based on service type and + invokes ones of the methods of this class to configure the device. + """ -class GenericConfigRpcManager(agent_base.AgentBaseRPCManager): def __init__(self, sc, conf): """Instantiates child and parent class objects. @@ -161,17 +161,15 @@ def clear_healthmonitor(self, context, resource_data): resource_data['vmid']) -"""Implements event handlers and their helper methods. - -Object of this class is registered with the event class of core service -controller. Based on the event key, handle_event method of this class is -invoked by core service controller. - -""" - - class GenericConfigEventHandler(agent_base.AgentBaseEventHandler, nfp_api.NfpEventHandler): + """Implements event handlers and their helper methods. + + Object of this class is registered with the event class of core service + controller. Based on the event key, handle_event method of this class is + invoked by core service controller. + """ + def __init__(self, sc, drivers, rpcmgr): super(GenericConfigEventHandler, self).__init__( sc, drivers, rpcmgr) diff --git a/gbpservice/nfp/configurator/agents/loadbalancer_v1.py b/gbpservice/contrib/nfp/configurator/agents/loadbalancer_v1.py similarity index 83% rename from gbpservice/nfp/configurator/agents/loadbalancer_v1.py rename to gbpservice/contrib/nfp/configurator/agents/loadbalancer_v1.py index b0f4a8ec6c..7246288199 100644 --- a/gbpservice/nfp/configurator/agents/loadbalancer_v1.py +++ b/gbpservice/contrib/nfp/configurator/agents/loadbalancer_v1.py @@ -9,39 +9,39 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import os -from gbpservice.nfp.configurator.agents import agent_base -from gbpservice.nfp.configurator.lib import data_filter -from gbpservice.nfp.configurator.lib import lb_constants -from gbpservice.nfp.configurator.lib import utils +from neutron import context + +from gbpservice.contrib.nfp.configurator.agents import agent_base +from gbpservice.contrib.nfp.configurator.lib import data_filter +from gbpservice.contrib.nfp.configurator.lib import lb_constants +from gbpservice.contrib.nfp.configurator.lib import utils from gbpservice.nfp.core import event as nfp_event -from gbpservice.nfp.core import module as nfp_api from gbpservice.nfp.core import log as nfp_logging - -from neutron import context +from gbpservice.nfp.core import module as nfp_api LOG = nfp_logging.getLogger(__name__) -""" Implements LBaaS response path to Neutron plugin. -Methods of this class are invoked by the LBaasEventHandler class and also -by driver class for sending response from driver to the LBaaS Neutron plugin. -""" - class LBaasRpcSender(data_filter.Filter): + """Implements LBaaS response path to Neutron plugin. + + Methods of this class are invoked by LBaasEventHandler class + and also by driver class for sending response from driver to + the LBaaS Neutron plugin. + """ def __init__(self, sc): self.notify = agent_base.AgentBaseNotification(sc) def get_logical_device(self, pool_id, context): """ Calls data filter library to get logical device from pool_id. - :param pool_id: object type - :param context: context which has list of all pool related resources - belonging to that tenant + :param pool_id: Neutron LBaaS pool id + :param context: RPC context Returns: logical_device + """ return self.call( context, @@ -54,9 +54,14 @@ def get_logical_device(self, pool_id, context): def update_status(self, obj_type, obj_id, status, agent_info, obj=None): """ Enqueues the response from LBaaS operation to neutron plugin. - :param obj_type: object type - :param obj_id: object id - :param status: status of the object to be set + :param obj_type: Neutron LBaaS object type + :param obj_id: Neutron LBaaS object id + :param status: Neutron LBaaS object status to be set + :param agent_info: Agent info which carries context which is needed + in config_orch to send response back to *aaS plugin + :param obj: Neutron LBaaS object + + Returns: None """ @@ -74,9 +79,11 @@ def update_status(self, obj_type, obj_id, status, agent_info, obj=None): def update_pool_stats(self, pool_id, stats, context, pool=None): """ Enqueues the response from LBaaS operation to neutron plugin. - :param pool_id: pool id + :param pool_id: Neutron LBaaS pool id :param stats: statistics of that pool + :param context: RPC context + Returns: None """ msg = {'info': {'service_type': lb_constants.SERVICE_TYPE, 'context': context.to_dict()}, @@ -92,10 +99,12 @@ def update_pool_stats(self, pool_id, stats, context, pool=None): def vip_deleted(self, vip, status, agent_info): """ Enqueues the response from LBaaS operation to neutron plugin. - :param vip: object type - :param vip_id: object id - :param status: status of the object to be set + :param vip: Neutron LBaaS vip resource + :param status: Neutron LBaaS vip resource status + :param agent_info: Agent info which carries context which is needed + in config_orch to send response back to *aaS plugin + Returns: None """ msg = {'info': {'service_type': lb_constants.SERVICE_TYPE, 'context': agent_info['context']}, @@ -107,18 +116,17 @@ def vip_deleted(self, vip, status, agent_info): } self.notify._notification(msg) -"""Implements APIs invoked by configurator for processing RPC messages. -RPC client of configurator module receives RPC messages from REST server -and invokes the API of this class. The instance of this class is registered -with configurator module using register_service_agent API. Configurator module -identifies the service agent object based on service type and invokes ones of -the methods of this class to configure the device. - -""" +class LBaaSRpcManager(agent_base.AgentBaseRPCManager): + """Implements APIs invoked by configurator for processing RPC messages. + RPC client of configurator module receives RPC messages from REST server + and invokes the API of this class. The instance of this class is registered + with configurator module using register_service_agent API. Configurator + module identifies the service agent object based on service type and + invokes one of the methods of this class to configure the device. -class LBaaSRpcManager(agent_base.AgentBaseRPCManager): + """ def __init__(self, sc, conf): """Instantiates child and parent class objects. @@ -137,16 +145,18 @@ def _send_event(self, event_id, data, serialize=False, binding_key=None, """Posts an event to framework. :param event_id: Unique identifier for the event - :param event_key: Event key for serialization - :param serialize: Serialize the event + :param data: event data + :param serialize: boolean value used to serialize the event :param binding_key: binding key to be used for serialization :param key: event key + Returns: None + """ ev = self.sc.new_event(id=event_id, data=data) ev.key = key - ev.sequence = serialize + ev.serialize = serialize ev.binding_key = binding_key self.sc.post_event(ev) @@ -373,17 +383,15 @@ def agent_updated(self, context, payload): self._send_event(lb_constants.EVENT_AGENT_UPDATED, arg_dict) -"""Implements event handlers and their helper methods. - -Object of this class is registered with the event class of core service -controller. Based on the event key, handle_event method of this class is -invoked by core service controller. - -""" - - class LBaaSEventHandler(agent_base.AgentBaseEventHandler, nfp_api.NfpEventHandler): + """Implements event handlers and their helper methods. + + Object of this class is registered with the event class of core service + controller. Based on the event key, handle_event method of this class is + invoked by core service controller. + + """ instance_mapping = {} def __init__(self, sc, drivers, rpcmgr): @@ -392,33 +400,30 @@ def __init__(self, sc, drivers, rpcmgr): self.rpcmgr = rpcmgr self.plugin_rpc = LBaasRpcSender(sc) - """TODO(pritam): Remove neutron context dependency. As of now because - config agent needs context in notification, and internal poll event - like collect_stats() does not have context, creating context here, - but should get rid of this in future. + """REVISIT (pritam): + Remove neutron context dependency. As of now + because config agent needs context in notification, and internal + poll event like collect_stats() does not have context, creating + context here, but should get rid of this in future. """ self.context = context.get_admin_context_without_session() - def _get_driver(self, driver_name): - """Retrieves service driver object based on service type input. - - Currently, service drivers are identified with service type. Support - for single driver per service type is provided. When multi-vendor - support is going to be provided, the driver should be selected based - on both service type and vendor name. + def _get_driver(self, service_vendor): + """Retrieves service driver instance based on service type + and service vendor. - :param service_type: Service type - loadbalancer + :param service_vendor: service vendor Returns: Service driver instance """ - driver = lb_constants.SERVICE_TYPE + driver_name + driver = lb_constants.SERVICE_TYPE + service_vendor return self.drivers[driver] def handle_event(self, ev): - """Processes the generated events in worker context. + """Processes generated events in worker context. - Processes the following events. + Processes following events. - create pool - update pool - delete pool @@ -437,28 +442,23 @@ def handle_event(self, ev): Returns: None """ - msg = ("Handling event=%s" % (ev.id)) + msg = ("Starting handling event %s" % (ev.id)) LOG.info(msg) try: - msg = ("Worker process with ID: %s starting " - "to handle task: %s of topic: %s. " - % (os.getpid(), ev.id, lb_constants.LBAAS_AGENT_RPC_TOPIC)) - LOG.debug(msg) - method = getattr(self, "_%s" % (ev.id.lower())) method(ev) except Exception as err: - msg = ("Failed to perform the operation: %s. %s" + msg = ("Failed to handle event %s. Reason is %s" % (ev.id, str(err).capitalize())) LOG.error(msg) finally: if ev.id == lb_constants.EVENT_COLLECT_STATS: - """Do not say event done for collect stats as it is + """Do not say event done for this event as it is to be executed forever """ pass else: - msg = ("Calling event done for event=%s" % (ev.id)) + msg = ("Successfully handled event %s" % (ev.id)) LOG.info(msg) self.sc.event_complete(ev) @@ -471,18 +471,18 @@ def _handle_event_vip(self, ev, operation): driver = self._get_driver(service_vendor) try: - if operation == 'create': + if operation == lb_constants.CREATE: driver.create_vip(vip, context) - elif operation == 'update': + elif operation == lb_constants.UPDATE: old_vip = data['old_vip'] driver.update_vip(old_vip, vip, context) - elif operation == 'delete': + elif operation == lb_constants.DELETE: driver.delete_vip(vip, context) self.plugin_rpc.vip_deleted(vip, lb_constants.ACTIVE, agent_info) return # Don't update object status for delete operation except Exception: - if operation == 'delete': + if operation == lb_constants.DELETE: msg = ("Failed to delete vip %s" % (vip['id'])) self.plugin_rpc.vip_deleted(vip, lb_constants.ACTIVE, agent_info) @@ -497,13 +497,13 @@ def _handle_event_vip(self, ev, operation): agent_info, vip) def _create_vip(self, ev): - self._handle_event_vip(ev, 'create') + self._handle_event_vip(ev, lb_constants.CREATE) def _update_vip(self, ev): - self._handle_event_vip(ev, 'update') + self._handle_event_vip(ev, lb_constants.UPDATE) def _delete_vip(self, ev): - self._handle_event_vip(ev, 'delete') + self._handle_event_vip(ev, lb_constants.DELETE) def _handle_event_pool(self, ev, operation): data = ev.data @@ -512,7 +512,7 @@ def _handle_event_pool(self, ev, operation): agent_info = context.pop('agent_info') service_vendor = agent_info['service_vendor'] try: - if operation == 'create': + if operation == lb_constants.CREATE: driver_name = data['driver_name'] driver_id = driver_name + service_vendor if (driver_id) not in self.drivers.keys(): @@ -525,17 +525,17 @@ def _handle_event_pool(self, ev, operation): driver = self.drivers[driver_id] driver.create_pool(pool, context) LBaaSEventHandler.instance_mapping[pool['id']] = driver_name - elif operation == 'update': + elif operation == lb_constants.UPDATE: old_pool = data['old_pool'] driver = self._get_driver(service_vendor) # pool['id']) driver.update_pool(old_pool, pool, context) - elif operation == 'delete': + elif operation == lb_constants.DELETE: driver = self._get_driver(service_vendor) # pool['id']) driver.delete_pool(pool, context) del LBaaSEventHandler.instance_mapping[pool['id']] return # Don't update object status for delete operation except Exception: - if operation == 'delete': + if operation == lb_constants.DELETE: msg = ("Failed to delete pool %s" % (pool['id'])) LOG.warn(msg) del LBaaSEventHandler.instance_mapping[pool['id']] @@ -549,13 +549,13 @@ def _handle_event_pool(self, ev, operation): agent_info, pool) def _create_pool(self, ev): - self._handle_event_pool(ev, 'create') + self._handle_event_pool(ev, lb_constants.CREATE) def _update_pool(self, ev): - self._handle_event_pool(ev, 'update') + self._handle_event_pool(ev, lb_constants.UPDATE) def _delete_pool(self, ev): - self._handle_event_pool(ev, 'delete') + self._handle_event_pool(ev, lb_constants.DELETE) def _handle_event_member(self, ev, operation): data = ev.data @@ -565,16 +565,16 @@ def _handle_event_member(self, ev, operation): service_vendor = agent_info['service_vendor'] driver = self._get_driver(service_vendor) # member['pool_id']) try: - if operation == 'create': + if operation == lb_constants.CREATE: driver.create_member(member, context) - elif operation == 'update': + elif operation == lb_constants.UPDATE: old_member = data['old_member'] driver.update_member(old_member, member, context) - elif operation == 'delete': + elif operation == lb_constants.DELETE: driver.delete_member(member, context) return # Don't update object status for delete operation except Exception: - if operation == 'delete': + if operation == lb_constants.DELETE: msg = ("Failed to delete member %s" % (member['id'])) LOG.warn(msg) else: @@ -587,13 +587,13 @@ def _handle_event_member(self, ev, operation): agent_info, member) def _create_member(self, ev): - self._handle_event_member(ev, 'create') + self._handle_event_member(ev, lb_constants.CREATE) def _update_member(self, ev): - self._handle_event_member(ev, 'update') + self._handle_event_member(ev, lb_constants.UPDATE) def _delete_member(self, ev): - self._handle_event_member(ev, 'delete') + self._handle_event_member(ev, lb_constants.DELETE) def _handle_event_pool_health_monitor(self, ev, operation): data = ev.data @@ -606,20 +606,20 @@ def _handle_event_pool_health_monitor(self, ev, operation): assoc_id = {'pool_id': pool_id, 'monitor_id': health_monitor['id']} try: - if operation == 'create': + if operation == lb_constants.CREATE: driver.create_pool_health_monitor(health_monitor, pool_id, context) - elif operation == 'update': + elif operation == lb_constants.UPDATE: old_health_monitor = data['old_health_monitor'] driver.update_pool_health_monitor(old_health_monitor, health_monitor, pool_id, context) - elif operation == 'delete': + elif operation == lb_constants.DELETE: driver.delete_pool_health_monitor(health_monitor, pool_id, context) return # Don't update object status for delete operation except Exception: - if operation == 'delete': + if operation == lb_constants.DELETE: msg = ("Failed to delete pool health monitor." " assoc_id: %s" % (assoc_id)) LOG.warn(msg) @@ -633,23 +633,23 @@ def _handle_event_pool_health_monitor(self, ev, operation): agent_info, health_monitor) def _create_pool_health_monitor(self, ev): - self._handle_event_pool_health_monitor(ev, 'create') + self._handle_event_pool_health_monitor(ev, lb_constants.CREATE) def _update_pool_health_monitor(self, ev): - self._handle_event_pool_health_monitor(ev, 'update') + self._handle_event_pool_health_monitor(ev, lb_constants.UPDATE) def _delete_pool_health_monitor(self, ev): - self._handle_event_pool_health_monitor(ev, 'delete') + self._handle_event_pool_health_monitor(ev, lb_constants.DELETE) def _agent_updated(self, ev): - """ TODO:(pritam): Support """ + """ REVISIT(pritam): Implement this method """ return None def _collect_stats(self, ev): self.sc.poll_event(ev) @nfp_api.poll_event_desc(event=lb_constants.EVENT_COLLECT_STATS, - spacing=60) + spacing=60) def collect_stats(self, ev): for pool_id, driver_name in LBaaSEventHandler.instance_mapping.items(): driver_id = lb_constants.SERVICE_TYPE + driver_name @@ -734,7 +734,7 @@ def register_service_agent(cm, sc, conf, rpcmgr): """ - service_type = 'loadbalancer' # lb_constants.SERVICE_TYPE + service_type = lb_constants.SERVICE_TYPE cm.register_service_agent(service_type, rpcmgr) @@ -750,7 +750,7 @@ def init_agent(cm, sc, conf): try: drivers = load_drivers(sc, conf) except Exception as err: - msg = ("Loadbalaner agent failed to load service drivers. %s" + msg = ("Loadbalaner agent failed to load service drivers. Reason:%s" % (str(err).capitalize())) LOG.error(msg) raise err @@ -764,7 +764,7 @@ def init_agent(cm, sc, conf): try: events_init(sc, drivers, rpcmgr) except Exception as err: - msg = ("Loadbalaner agent failed to initialize events. %s" + msg = ("Loadbalaner agent failed to initialize events. Reason:%s" % (str(err).capitalize())) LOG.error(msg) raise err @@ -777,7 +777,7 @@ def init_agent(cm, sc, conf): register_service_agent(cm, sc, conf, rpcmgr) except Exception as err: msg = ("Failed to register Loadbalaner agent with" - " configurator module. %s" % (str(err).capitalize())) + " configurator module. Reason:%s" % (str(err).capitalize())) LOG.error(msg) raise err else: diff --git a/gbpservice/nfp/configurator/agents/loadbalancer_v2.py b/gbpservice/contrib/nfp/configurator/agents/loadbalancer_v2.py similarity index 99% rename from gbpservice/nfp/configurator/agents/loadbalancer_v2.py rename to gbpservice/contrib/nfp/configurator/agents/loadbalancer_v2.py index 2dd5019d07..1bcaf4c06c 100644 --- a/gbpservice/nfp/configurator/agents/loadbalancer_v2.py +++ b/gbpservice/contrib/nfp/configurator/agents/loadbalancer_v2.py @@ -13,10 +13,10 @@ import os from gbpservice.nfp.common import exceptions -from gbpservice.nfp.configurator.agents import agent_base -from gbpservice.nfp.configurator.lib import data_filter -from gbpservice.nfp.configurator.lib import lbv2_constants as lb_const -from gbpservice.nfp.configurator.lib import utils +from gbpservice.contrib.nfp.configurator.agents import agent_base +from gbpservice.contrib.nfp.configurator.lib import data_filter +from gbpservice.contrib.nfp.configurator.lib import lbv2_constants as lb_const +from gbpservice.contrib.nfp.configurator.lib import utils from gbpservice.nfp.core import event as nfp_event from gbpservice.nfp.core import log as nfp_logging from gbpservice.nfp.core import module as nfp_api diff --git a/gbpservice/nfp/configurator/agents/nfp_service.py b/gbpservice/contrib/nfp/configurator/agents/nfp_service.py similarity index 91% rename from gbpservice/nfp/configurator/agents/nfp_service.py rename to gbpservice/contrib/nfp/configurator/agents/nfp_service.py index 945b293f6d..3f35b23bb3 100644 --- a/gbpservice/nfp/configurator/agents/nfp_service.py +++ b/gbpservice/contrib/nfp/configurator/agents/nfp_service.py @@ -13,24 +13,25 @@ import os import oslo_messaging as messaging -from gbpservice.nfp.configurator.agents import agent_base -from gbpservice.nfp.configurator.lib import nfp_service_constants as const -from gbpservice.nfp.configurator.lib import utils as load_driver +from gbpservice.contrib.nfp.configurator.agents import agent_base +from gbpservice.contrib.nfp.configurator.lib import ( + nfp_service_constants as const) +from gbpservice.contrib.nfp.configurator.lib import utils as load_driver from gbpservice.nfp.core import event as nfp_event from gbpservice.nfp.core import log as nfp_logging LOG = nfp_logging.getLogger(__name__) -""" Implements ConfigScriptRpcManager class which receives requests - from Configurator module. -Methods of this class are invoked by the configurator. Events are -created according to the requests received and enqueued to worker queues. +class ConfigScriptRpcManager(agent_base.AgentBaseRPCManager): + """ Implements ConfigScriptRpcManager class which receives requests + from Configurator module. -""" + Methods of this class are invoked by the configurator. Events are + created according to the requests received and enqueued to worker queues. + """ -class ConfigScriptRpcManager(agent_base.AgentBaseRPCManager): RPC_API_VERSION = '1.0' target = messaging.Target(version=RPC_API_VERSION) @@ -63,15 +64,14 @@ def run_nfp_service(self, context, resource_data): data=arg_dict, key=None) self.sc.post_event(ev) -""" Handler class which invokes nfp_service driver methods - -Worker processes dequeue the worker queues and invokes the -appropriate handler class methods for ConfigScript methods. -""" +class ConfigScriptEventHandler(agent_base.AgentBaseEventHandler): + """ Handler class which invokes nfp_service driver methods + Worker processes dequeue the worker queues and invokes the + appropriate handler class methods for ConfigScript methods. -class ConfigScriptEventHandler(agent_base.AgentBaseEventHandler): + """ def __init__(self, sc, drivers, rpcmgr): """ Initializes parent and child class objects. diff --git a/gbpservice/nfp/configurator/agents/vpn.py b/gbpservice/contrib/nfp/configurator/agents/vpn.py similarity index 93% rename from gbpservice/nfp/configurator/agents/vpn.py rename to gbpservice/contrib/nfp/configurator/agents/vpn.py index 6b9e6b302f..85c706b723 100644 --- a/gbpservice/nfp/configurator/agents/vpn.py +++ b/gbpservice/contrib/nfp/configurator/agents/vpn.py @@ -13,15 +13,14 @@ import os -from gbpservice.nfp.configurator.agents import agent_base -from gbpservice.nfp.configurator.drivers.base import base_driver -from gbpservice.nfp.configurator.lib import data_filter -from gbpservice.nfp.configurator.lib import utils -from gbpservice.nfp.configurator.lib import vpn_constants as const -from gbpservice.nfp.core import controller as main -from gbpservice.nfp.core.event import Event -from gbpservice.nfp.core import module as nfp_api +from gbpservice.contrib.nfp.configurator.agents import agent_base +from gbpservice.contrib.nfp.configurator.drivers.base import base_driver +from gbpservice.contrib.nfp.configurator.lib import data_filter +from gbpservice.contrib.nfp.configurator.lib import utils +from gbpservice.contrib.nfp.configurator.lib import vpn_constants as const +from gbpservice.nfp.core import event as nfp_event from gbpservice.nfp.core import log as nfp_logging +from gbpservice.nfp.core import module as nfp_api import oslo_messaging as messaging @@ -145,7 +144,7 @@ def __init__(self, conf, sc): """ - super(VPNaasRpcManager, self).__init__(conf, sc) + super(VPNaasRpcManager, self).__init__(sc, conf) def vpnservice_updated(self, context, **resource_data): """Registers the VPNaas plugin events to update the vpn configurations. @@ -181,9 +180,9 @@ def __init__(self, sc, drivers): self._drivers = drivers self._plugin_rpc = VpnaasRpcSender(self._sc) - def _get_driver(self): + def _get_driver(self, service_vendor): - driver_id = const.SERVICE_TYPE + const.SERVICE_VENDOR + driver_id = const.SERVICE_TYPE + service_vendor return self._drivers[driver_id] def handle_event(self, ev): @@ -204,15 +203,15 @@ def handle_event(self, ev): % (os.getpid(), ev.id, const.VPN_GENERIC_CONFIG_RPC_TOPIC)) LOG.debug(msg) - - driver = self._get_driver() + service_vendor = ( + ev.data['context']['agent_info']['service_vendor']) + driver = self._get_driver(service_vendor) + setattr(VPNaasEventHandler, "service_driver", driver) self._vpnservice_updated(ev, driver) except Exception as err: msg = ("Failed to perform the operation: %s. %s" % (ev.id, str(err).capitalize())) LOG.error(msg) - finally: - self._sc.event_done(ev) def _vpnservice_updated(self, ev, driver): """ @@ -278,9 +277,8 @@ def _sync_ipsec_conns(self, context, svc_context): Returns: None """ try: - self._get_driver() - return self._get_driver().check_status(context, svc_context) + return self.service_driver.check_status(context, svc_context) except Exception as err: msg = ("Failed to sync ipsec connection information. %s." % str(err).capitalize()) @@ -316,9 +314,9 @@ def events_init(sc, drivers): Returns: None """ evs = [ - Event(id='VPNSERVICE_UPDATED', + nfp_event.Event(id='VPNSERVICE_UPDATED', handler=VPNaasEventHandler(sc, drivers)), - Event(id='VPN_SYNC', + nfp_event.Event(id='VPN_SYNC', handler=VPNaasEventHandler(sc, drivers))] sc.register_events(evs) @@ -356,7 +354,7 @@ def register_service_agent(cm, sc, conf): Returns: None """ - rpcmgr = VPNaasRpcManager(sc, conf) + rpcmgr = VPNaasRpcManager(conf, sc) cm.register_service_agent(const.SERVICE_TYPE, rpcmgr) diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/drivers/firewall/__init__.py b/gbpservice/contrib/nfp/configurator/config/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/configurator/drivers/firewall/__init__.py rename to gbpservice/contrib/nfp/configurator/config/__init__.py diff --git a/gbpservice/nfp/configurator/config/asav.conf b/gbpservice/contrib/nfp/configurator/config/asav.conf similarity index 100% rename from gbpservice/nfp/configurator/config/asav.conf rename to gbpservice/contrib/nfp/configurator/config/asav.conf diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/drivers/loadbalancer/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/configurator/drivers/loadbalancer/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/__init__.py diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/drivers/vpn/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/base/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/configurator/drivers/vpn/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/base/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/base/base_driver.py b/gbpservice/contrib/nfp/configurator/drivers/base/base_driver.py similarity index 93% rename from gbpservice/nfp/configurator/drivers/base/base_driver.py rename to gbpservice/contrib/nfp/configurator/drivers/base/base_driver.py index 5962a2b092..dab884d476 100644 --- a/gbpservice/nfp/configurator/drivers/base/base_driver.py +++ b/gbpservice/contrib/nfp/configurator/drivers/base/base_driver.py @@ -15,18 +15,19 @@ from oslo_serialization import jsonutils -from gbpservice.nfp.configurator.lib import constants as const +from gbpservice.contrib.nfp.configurator.lib import constants as const from gbpservice.nfp.core import log as nfp_logging LOG = nfp_logging.getLogger(__name__) -"""Every service vendor must inherit this class. If any service vendor wants - to add extra methods for their service, apart from below given, they should - add method definition here and implement the method in their driver -""" - class BaseDriver(object): + """ Implements common functions for drivers. + + Every service vendor must inherit this class. If any service vendor wants + to add extra methods for their service, apart from below given, they should + add method definition here and implement the method in their driver + """ def __init__(self, conf): pass @@ -120,6 +121,7 @@ def _configure_log_forwarding(self, url, mgmt_ip, port): msg = ("Initiating POST request to configure log forwarding " "for service at: %r" % mgmt_ip) LOG.info(msg) + try: resp = requests.post(url, data, timeout=self.timeout) except requests.exceptions.ConnectionError as err: diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/drivers/vpn/vyos/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/firewall/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/configurator/drivers/vpn/vyos/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/firewall/__init__.py diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/lib/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/firewall/asav/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/configurator/lib/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/firewall/asav/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/firewall/asav/asav_fw_constants.py b/gbpservice/contrib/nfp/configurator/drivers/firewall/asav/asav_fw_constants.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/firewall/asav/asav_fw_constants.py rename to gbpservice/contrib/nfp/configurator/drivers/firewall/asav/asav_fw_constants.py diff --git a/gbpservice/nfp/configurator/drivers/firewall/asav/asav_fw_driver.py b/gbpservice/contrib/nfp/configurator/drivers/firewall/asav/asav_fw_driver.py similarity index 98% rename from gbpservice/nfp/configurator/drivers/firewall/asav/asav_fw_driver.py rename to gbpservice/contrib/nfp/configurator/drivers/firewall/asav/asav_fw_driver.py index 3a7dbe4a0d..74e95248a3 100644 --- a/gbpservice/nfp/configurator/drivers/firewall/asav/asav_fw_driver.py +++ b/gbpservice/contrib/nfp/configurator/drivers/firewall/asav/asav_fw_driver.py @@ -22,16 +22,16 @@ from requests.auth import HTTPBasicAuth -from gbpservice.nfp.configurator.drivers.base import base_driver -from gbpservice.nfp.configurator.drivers.firewall.asav import ( +from gbpservice.contrib.nfp.configurator.drivers.base import base_driver +from gbpservice.contrib.nfp.configurator.drivers.firewall.asav import ( asav_fw_constants as const) -from gbpservice.nfp.configurator.lib import constants as common_const -from gbpservice.nfp.configurator.lib import fw_constants as fw_const +from gbpservice.contrib.nfp.configurator.lib import constants as common_const +from gbpservice.contrib.nfp.configurator.lib import fw_constants as fw_const from gbpservice.nfp.core import log as nfp_logging LOG = nfp_logging.getLogger(__name__) -TIMEOUT = 20 +TIMEOUT = 40 asav_auth_opts = [ cfg.StrOpt( @@ -854,11 +854,12 @@ def create_firewall(self, context, firewall, host): msg = ("Failed to configure ASAv Firewall. Reason: %r" % result) LOG.error(msg) + return result else: self.save_config(mgmt_ip, firewall['id']) msg = ("Configured ASAv Firewall.") LOG.info(msg) - return result + return common_const.STATUS_ACTIVE except Exception as err: msg = ("Failed to configure firewall. Error: %r" % err) LOG.error(msg) diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/modules/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/firewall/paloalto/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/configurator/modules/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/firewall/paloalto/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/firewall/paloalto/paloalto_fw_constants.py b/gbpservice/contrib/nfp/configurator/drivers/firewall/paloalto/paloalto_fw_constants.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/firewall/paloalto/paloalto_fw_constants.py rename to gbpservice/contrib/nfp/configurator/drivers/firewall/paloalto/paloalto_fw_constants.py diff --git a/gbpservice/nfp/configurator/drivers/firewall/paloalto/paloalto_fw_driver.py b/gbpservice/contrib/nfp/configurator/drivers/firewall/paloalto/paloalto_fw_driver.py similarity index 99% rename from gbpservice/nfp/configurator/drivers/firewall/paloalto/paloalto_fw_driver.py rename to gbpservice/contrib/nfp/configurator/drivers/firewall/paloalto/paloalto_fw_driver.py index b8c06622f9..11824d6c9f 100644 --- a/gbpservice/nfp/configurator/drivers/firewall/paloalto/paloalto_fw_driver.py +++ b/gbpservice/contrib/nfp/configurator/drivers/firewall/paloalto/paloalto_fw_driver.py @@ -17,11 +17,11 @@ from oslo_serialization import jsonutils -from gbpservice.nfp.configurator.drivers.base import base_driver -from gbpservice.nfp.configurator.drivers.firewall.paloalto import ( +from gbpservice.contrib.nfp.configurator.drivers.base import base_driver +from gbpservice.contrib.nfp.configurator.drivers.firewall.paloalto import ( paloalto_fw_constants as const) -from gbpservice.nfp.configurator.lib import constants as common_const -from gbpservice.nfp.configurator.lib import fw_constants as fw_const +from gbpservice.contrib.nfp.configurator.lib import constants as common_const +from gbpservice.contrib.nfp.configurator.lib import fw_constants as fw_const import sys import json diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/test_data/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/firewall/vyos/__init__.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/configurator/test_data/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/firewall/vyos/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/firewall/vyos/vyos_fw_constants.py b/gbpservice/contrib/nfp/configurator/drivers/firewall/vyos/vyos_fw_constants.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/firewall/vyos/vyos_fw_constants.py rename to gbpservice/contrib/nfp/configurator/drivers/firewall/vyos/vyos_fw_constants.py diff --git a/gbpservice/contrib/nfp/configurator/drivers/firewall/vyos/vyos_fw_driver.py b/gbpservice/contrib/nfp/configurator/drivers/firewall/vyos/vyos_fw_driver.py new file mode 100644 index 0000000000..5f7750ca10 --- /dev/null +++ b/gbpservice/contrib/nfp/configurator/drivers/firewall/vyos/vyos_fw_driver.py @@ -0,0 +1,618 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ast +import requests + +from gbpservice.nfp.core import log as nfp_logging + +from oslo_serialization import jsonutils + +from gbpservice.contrib.nfp.configurator.drivers.base import base_driver +from gbpservice.contrib.nfp.configurator.drivers.firewall.vyos import ( + vyos_fw_constants as const) +from gbpservice.contrib.nfp.configurator.lib import constants as common_const +from gbpservice.contrib.nfp.configurator.lib import fw_constants as fw_const + +LOG = nfp_logging.getLogger(__name__) + + +class RestApi(object): + """ Issues REST calls to the Service VMs + + REST API wrapper class that provides POST method to + communicate with the Service VM. + + """ + + def __init__(self, timeout): + self.timeout = timeout + + def request_type_to_api_map(self, url, data, request_type): + return getattr(requests, request_type)(url, + data=data, timeout=self.timeout) + + def fire(self, url, data, request_type): + """ Invokes REST POST call to the Service VM. + + :param url: URL to connect. + :param data: data to be sent. + :param request_type: POST/PUT/DELETE + + Returns: SUCCESS/Error message + + """ + + try: + resp = self.request_type_to_api_map(url, + data, request_type.lower()) + except requests.exceptions.ConnectionError as err: + msg = ("Failed to establish connection to the service at URL: %r. " + "ERROR: %r" % (url, str(err).capitalize())) + return msg + except Exception as err: + msg = ("Failed to issue %r call " + "to service. URL: %r, Data: %r. Error: %r" % + (request_type.upper(), url, data, str(err).capitalize())) + return msg + + try: + result = resp.json() + except ValueError as err: + msg = ("Unable to parse response, invalid JSON. URL: " + "%r. %r" % (url, str(err).capitalize())) + return msg + if resp.status_code not in common_const.SUCCESS_CODES or ( + result.get('status') is False): + return result + return common_const.STATUS_SUCCESS + + +class FwGenericConfigDriver(base_driver.BaseDriver): + """ Implements device configuration requests. + + Firewall generic configuration driver for handling device + configuration requests from Orchestrator. + """ + + def __init__(self): + pass + + def _configure_static_ips(self, resource_data): + """ Configure static IPs for provider and stitching interfaces + of service VM. + + Issues REST call to service VM for configuration of static IPs. + + :param resource_data: a dictionary of firewall rules and objects + send by neutron plugin + + Returns: SUCCESS/Failure message with reason. + + """ + + static_ips_info = dict( + provider_ip=resource_data.get('provider_ip'), + provider_cidr=resource_data.get('provider_cidr'), + provider_mac=resource_data.get('provider_mac'), + stitching_ip=resource_data.get('stitching_ip'), + stitching_cidr=resource_data.get('stitching_cidr'), + stitching_mac=resource_data.get('stitching_mac'), + provider_interface_position=resource_data.get( + 'provider_interface_index'), + stitching_interface_position=resource_data.get( + 'stitching_interface_index')) + mgmt_ip = resource_data['mgmt_ip'] + + url = const.request_url % (mgmt_ip, + self.port, + 'add_static_ip') + data = jsonutils.dumps(static_ips_info) + + msg = ("Initiating POST request to add static IPs for primary " + "service at: %r" % mgmt_ip) + LOG.info(msg) + + err_msg = ("Static IP POST request to the VyOS firewall " + "service at %s failed. " % url) + try: + resp = self.rest_api.fire(url, data, common_const.POST) + except Exception as err: + err_msg += ("Reason: %r" % str(err).capitalize()) + LOG.error(err_msg) + return err_msg + + if resp is common_const.STATUS_SUCCESS: + msg = ("Static IPs successfully added for service at %r." % url) + LOG.info(msg) + return resp + + err_msg += (("Status code: %r, Reason: %r" % + (resp['status'], resp['reason'])) + if type(resp) is dict + else ("Reason: " + resp)) + LOG.error(err_msg) + return err_msg + + def configure_interfaces(self, context, resource_data): + """ Configure interfaces for the service VM. + + Calls static IP configuration function and implements + persistent rule addition in the service VM. + Issues REST call to service VM for configuration of interfaces. + + :param context: neutron context + :param resource_data: a dictionary of firewall rules and objects + send by neutron plugin + + Returns: SUCCESS/Failure message with reason. + + """ + + mgmt_ip = resource_data['mgmt_ip'] + + try: + result_log_forward = self._configure_log_forwarding( + const.request_url, mgmt_ip, self.port) + except Exception as err: + msg = ("Failed to configure log forwarding for service at %s. " + "Error: %s" % (mgmt_ip, err)) + LOG.error(msg) + else: + if result_log_forward == common_const.UNHANDLED: + pass + elif result_log_forward != common_const.STATUS_SUCCESS: + # Failure in log forward configuration won't break chain + # creation. However, error will be logged for detecting + # failure. + msg = ("Failed to configure log forwarding for service at %s. " + "Error: %s" % (mgmt_ip, result_log_forward)) + LOG.error(msg) + + try: + result_static_ips = self._configure_static_ips(resource_data) + except Exception as err: + msg = ("Failed to add static IPs. Error: %s" % err) + LOG.error(msg) + return msg + else: + if result_static_ips != common_const.STATUS_SUCCESS: + return result_static_ips + + rule_info = dict( + provider_mac=resource_data['provider_mac'], + stitching_mac=resource_data['stitching_mac']) + + url = const.request_url % (mgmt_ip, + self.port, 'add_rule') + data = jsonutils.dumps(rule_info) + msg = ("Initiating POST request to add persistent rule to primary " + "service at: %r" % mgmt_ip) + LOG.info(msg) + + err_msg = ("Add persistent rule POST request to the VyOS firewall " + "service at %s failed. " % url) + try: + resp = self.rest_api.fire(url, data, common_const.POST) + except Exception as err: + err_msg += ("Reason: %r" % str(err).capitalize()) + LOG.error(err_msg) + return err_msg + + if resp is common_const.STATUS_SUCCESS: + msg = ("Persistent rule successfully added for " + "service at %r." % url) + LOG.info(msg) + return resp + + err_msg += (("Status code: %r" % resp['status']) + if type(resp) is dict + else ("Reason: " + resp)) + LOG.error(err_msg) + return err_msg + + def _clear_static_ips(self, resource_data): + """ Clear static IPs for provider and stitching + interfaces of the service VM. + + Issues REST call to service VM for deletion of static IPs. + + :param resource_data: a dictionary of firewall rules and objects + send by neutron plugin + + Returns: SUCCESS/Failure message with reason. + + """ + + static_ips_info = dict( + provider_ip=resource_data.get('provider_ip'), + provider_cidr=resource_data.get('provider_cidr'), + provider_mac=resource_data.get('provider_mac'), + stitching_ip=resource_data.get('stitching_ip'), + stitching_cidr=resource_data.get('stitching_cidr'), + stitching_mac=resource_data.get('stitching_mac')) + mgmt_ip = resource_data['mgmt_ip'] + + url = const.request_url % (mgmt_ip, + self.port, + 'del_static_ip') + data = jsonutils.dumps(static_ips_info) + + msg = ("Initiating POST request to remove static IPs for primary " + "service at: %r" % mgmt_ip) + LOG.info(msg) + + err_msg = ("Static IP DELETE request to the VyOS firewall " + "service at %s failed. " % url) + try: + resp = self.rest_api.fire(url, data, common_const.DELETE) + except Exception as err: + err_msg += ("Reason: %r" % str(err).capitalize()) + LOG.error(err_msg) + return err_msg + + if resp is common_const.STATUS_SUCCESS: + msg = ("Static IPs successfully removed for service at %r." % url) + LOG.info(msg) + return resp + + err_msg += (("Status code: %r, Reason: %r" % + (resp['status'], resp['reason'])) + if type(resp) is dict + else ("Reason: " + resp)) + LOG.error(err_msg) + return err_msg + + def clear_interfaces(self, context, resource_data): + """ Clear interfaces for the service VM. + + Calls static IP clear function and implements + persistent rule deletion in the service VM. + Issues REST call to service VM for deletion of interfaces. + + :param context: neutron context + :param resource_data: a dictionary of firewall rules and objects + send by neutron plugin + + Returns: SUCCESS/Failure message with reason. + + """ + + try: + result_static_ips = self._clear_static_ips(resource_data) + except Exception as err: + msg = ("Failed to remove static IPs. Error: %s" % err) + LOG.error(msg) + return msg + else: + if result_static_ips != common_const.STATUS_SUCCESS: + return result_static_ips + else: + msg = ("Successfully removed static IPs. " + "Result: %s" % result_static_ips) + LOG.info(msg) + + rule_info = dict( + provider_mac=resource_data['provider_mac'], + stitching_mac=resource_data['stitching_mac']) + + mgmt_ip = resource_data['mgmt_ip'] + + msg = ("Initiating DELETE persistent rule.") + LOG.info(msg) + url = const.request_url % (mgmt_ip, self.port, 'delete_rule') + data = jsonutils.dumps(rule_info) + + err_msg = ("Persistent rule DELETE request to the VyOS firewall " + "service at %s failed. " % url) + try: + resp = self.rest_api.fire(url, data, common_const.DELETE) + except Exception as err: + err_msg += ("Reason: %r" % str(err).capitalize()) + LOG.error(err_msg) + return err_msg + + if resp is common_const.STATUS_SUCCESS: + msg = ("Persistent rules successfully deleted " + "for service at %r." % url) + LOG.info(msg) + return resp + + err_msg += (("Status code: %r." % resp['status']) + if type(resp) is dict + else ("Reason: " + resp)) + LOG.error(err_msg) + return err_msg + + def configure_routes(self, context, resource_data): + """ Configure routes for the service VM. + + Issues REST call to service VM for configuration of routes. + + :param context: neutron context + :param resource_data: a dictionary of firewall rules and objects + send by neutron plugin + + Returns: SUCCESS/Failure message with reason. + + """ + + mgmt_ip = resource_data.get('mgmt_ip') + source_cidrs = resource_data.get('source_cidrs') + gateway_ip = resource_data.get('gateway_ip') + + url = const.request_url % (mgmt_ip, self.port, + 'add-source-route') + route_info = [] + for source_cidr in source_cidrs: + route_info.append({'source_cidr': source_cidr, + 'gateway_ip': gateway_ip}) + data = jsonutils.dumps(route_info) + msg = ("Initiating POST request to configure route of " + "primary service at: %r" % mgmt_ip) + LOG.info(msg) + + err_msg = ("Configure routes POST request to the VyOS firewall " + "service at %s failed. " % url) + try: + resp = self.rest_api.fire(url, data, common_const.POST) + except Exception as err: + err_msg += ("Reason: %r" % str(err).capitalize()) + LOG.error(err_msg) + return err_msg + + if resp is common_const.STATUS_SUCCESS: + msg = ("Configured routes successfully for service at %r." % url) + LOG.info(msg) + return resp + + err_msg += (("Status code: %r, Reason: %r" % + (resp['status'], resp['reason'])) + if type(resp) is dict + else ("Reason: " + resp)) + LOG.error(err_msg) + return err_msg + + def clear_routes(self, context, resource_data): + """ Clear routes for the service VM. + + Issues REST call to service VM for deletion of routes. + + :param context: neutron context + :param resource_data: a dictionary of firewall rules and objects + send by neutron plugin + + Returns: SUCCESS/Failure message with reason. + + """ + + mgmt_ip = resource_data.get('mgmt_ip') + source_cidrs = resource_data.get('source_cidrs') + + url = const.request_url % (mgmt_ip, self.port, + 'delete-source-route') + route_info = [] + for source_cidr in source_cidrs: + route_info.append({'source_cidr': source_cidr}) + data = jsonutils.dumps(route_info) + msg = ("Initiating DELETE route request to primary service at: %r" + % mgmt_ip) + LOG.info(msg) + + err_msg = ("Routes DELETE request to the VyOS firewall " + "service at %s failed. " % url) + try: + resp = self.rest_api.fire(url, data, common_const.DELETE) + except Exception as err: + err_msg += ("Reason: %r" % str(err).capitalize()) + LOG.error(err_msg) + return err_msg + + if resp is common_const.STATUS_SUCCESS: + msg = ("Routes successfully removed for service at %r." % url) + LOG.info(msg) + return resp + + err_msg += (("Status code: %r, Reason: %r" % + (resp['status'], resp['reason'])) + if type(resp) is dict + else ("Reason: " + resp)) + LOG.error(err_msg) + return err_msg + + +class FwaasDriver(FwGenericConfigDriver): + """ Firewall as a service driver for handling firewall + service configuration requests. + + We initialize service type in this class because agent loads + class object only for those driver classes that have service type + initialized. Also, only this driver class is exposed to the agent. + + """ + + service_type = fw_const.SERVICE_TYPE + service_vendor = const.VYOS + + def __init__(self, conf): + self.conf = conf + self.timeout = const.REST_TIMEOUT + self.rest_api = RestApi(self.timeout) + self.host = self.conf.host + self.port = const.CONFIGURATION_SERVER_PORT + super(FwaasDriver, self).__init__() + + def _get_firewall_attribute(self, firewall): + """ Retrieves management IP from the firewall resource received + + :param firewall: firewall dictionary containing rules + and other objects + + Returns: management IP + + """ + + description = ast.literal_eval(firewall["description"]) + if not description.get('vm_management_ip'): + msg = ("Failed to find vm_management_ip.") + LOG.debug(msg) + raise + + if not description.get('service_vendor'): + msg = ("Failed to find service_vendor.") + LOG.debug(msg) + raise + + msg = ("Found vm_management_ip %s." + % description['vm_management_ip']) + LOG.debug(msg) + return description['vm_management_ip'] + + def create_firewall(self, context, firewall, host): + """ Implements firewall creation + + Issues REST call to service VM for firewall creation + + :param context: Neutron context + :param firewall: Firewall resource object from neutron fwaas plugin + :param host: Name of the host machine + + Returns: SUCCESS/Failure message with reason. + + """ + + msg = ("Processing create firewall request in FWaaS Driver " + "for Firewall ID: %s." % firewall['id']) + LOG.debug(msg) + mgmt_ip = self._get_firewall_attribute(firewall) + url = const.request_url % (mgmt_ip, + self.port, + 'configure-firewall-rule') + msg = ("Initiating POST request for FIREWALL ID: %r Tenant ID:" + " %r. URL: %s" % (firewall['id'], firewall['tenant_id'], url)) + LOG.info(msg) + data = jsonutils.dumps(firewall) + + err_msg = ("Configure firewall POST request to the VyOS " + "service at %s failed. " % url) + try: + resp = self.rest_api.fire(url, data, common_const.POST) + except Exception as err: + err_msg += ("Reason: %r" % str(err).capitalize()) + LOG.error(err_msg) + return common_const.STATUS_ERROR + + if resp is common_const.STATUS_SUCCESS: + msg = ("Configured firewall successfully for service at %r." % url) + LOG.info(msg) + return common_const.STATUS_ACTIVE + + err_msg += (("Reason: %r, Response Content: %r" % + (resp.pop('message'), resp)) + if type(resp) is dict + else ("Reason: " + resp)) + LOG.error(err_msg) + return common_const.STATUS_ERROR + + def update_firewall(self, context, firewall, host): + """ Implements firewall updation + + Issues REST call to service VM for firewall updation + + :param context: Neutron context + :param firewall: Firewall resource object from neutron fwaas plugin + :param host: Name of the host machine + + Returns: SUCCESS/Failure message with reason. + + """ + + mgmt_ip = self._get_firewall_attribute(firewall) + url = const.request_url % (mgmt_ip, + self.port, + 'update-firewall-rule') + msg = ("Initiating UPDATE request. URL: %s" % url) + LOG.info(msg) + data = jsonutils.dumps(firewall) + + err_msg = ("Update firewall POST request to the VyOS " + "service at %s failed. " % url) + try: + resp = self.rest_api.fire(url, data, common_const.PUT) + except Exception as err: + err_msg += ("Reason: %r" % str(err).capitalize()) + LOG.error(err_msg) + return common_const.STATUS_ERROR + + if resp is common_const.STATUS_SUCCESS: + msg = ("Updated firewall successfully for service at %r." % url) + LOG.info(msg) + return common_const.STATUS_ACTIVE + + err_msg += (("Reason: %r, Response Content: %r" % + (resp.pop('message'), resp)) + if type(resp) is dict + else ("Reason: " + resp)) + LOG.error(err_msg) + return common_const.STATUS_ERROR + + def delete_firewall(self, context, firewall, host): + """ Implements firewall deletion + + Issues REST call to service VM for firewall deletion + + :param context: Neutron context + :param firewall: Firewall resource object from neutron fwaas plugin + :param host: Name of the host machine + + Returns: SUCCESS/Failure message with reason. + + """ + + mgmt_ip = self._get_firewall_attribute(firewall) + url = const.request_url % (mgmt_ip, + self.port, + 'delete-firewall-rule') + msg = ("Initiating DELETE request. URL: %s" % url) + LOG.info(msg) + data = jsonutils.dumps(firewall) + + err_msg = ("Delete firewall POST request to the VyOS " + "service at %s failed. " % url) + try: + resp = self.rest_api.fire(url, data, common_const.DELETE) + except Exception as err: + err_msg += ("Reason: %r" % str(err).capitalize()) + LOG.error(err_msg) + return common_const.STATUS_ERROR + + if resp is common_const.STATUS_SUCCESS: + msg = ("Deleted firewall successfully for service at %r." % url) + LOG.info(msg) + return common_const.STATUS_DELETED + + if type(resp) is dict: + if not resp.get('delete_success') and ( + resp.get('message') == const.INTERFACE_NOT_FOUND): + err_msg += ("Firewall was not deleted as interface was not " + "available in the firewall. It might have got " + "detached. So marking this delete as SUCCESS. " + "URL: %r, Response Content: %r" % + (url, resp.content)) + LOG.error(err_msg) + return common_const.STATUS_SUCCESS + else: + err_msg += ("Response Content: %r" % resp) + else: + err_msg += ("Reason: " + resp) + LOG.error(err_msg) + return common_const.STATUS_ERROR diff --git a/gbpservice/nfp/base_configurator/api/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/__init__.py similarity index 100% rename from gbpservice/nfp/base_configurator/api/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/__init__.py diff --git a/gbpservice/nfp/base_configurator/api/v1/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v1/__init__.py similarity index 100% rename from gbpservice/nfp/base_configurator/api/v1/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v1/__init__.py diff --git a/gbpservice/nfp/config_orchestrator/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v1/haproxy/__init__.py similarity index 100% rename from gbpservice/nfp/config_orchestrator/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v1/haproxy/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v1/haproxy/haproxy_lb_driver.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v1/haproxy/haproxy_lb_driver.py similarity index 83% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v1/haproxy/haproxy_lb_driver.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v1/haproxy/haproxy_lb_driver.py index e20440f41f..1cc45701bf 100644 --- a/gbpservice/nfp/configurator/drivers/loadbalancer/v1/haproxy/haproxy_lb_driver.py +++ b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v1/haproxy/haproxy_lb_driver.py @@ -12,57 +12,34 @@ import ast -from gbpservice.nfp.configurator.drivers.base import base_driver -from gbpservice.nfp.configurator.drivers.loadbalancer.v1.haproxy import ( - haproxy_rest_client) -from gbpservice.nfp.configurator.lib import constants as common_const -from gbpservice.nfp.configurator.lib import lb_constants +from gbpservice.contrib.nfp.configurator.drivers.base import base_driver +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v1.\ + haproxy import (haproxy_rest_client) +from gbpservice.contrib.nfp.configurator.lib import constants as common_const +from gbpservice.contrib.nfp.configurator.lib import lb_constants from gbpservice.nfp.core import log as nfp_logging LOG = nfp_logging.getLogger(__name__) DRIVER_NAME = 'loadbalancer' -PROTOCOL_MAP = { - lb_constants.PROTOCOL_TCP: 'tcp', - lb_constants.PROTOCOL_HTTP: 'http', - lb_constants.PROTOCOL_HTTPS: 'https', -} -BALANCE_MAP = { - lb_constants.LB_METHOD_ROUND_ROBIN: 'roundrobin', - lb_constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn', - lb_constants.LB_METHOD_SOURCE_IP: 'source' -} -REQUEST_RETRIES = 0 -REQUEST_TIMEOUT = 120 - - -""" Loadbalancer generic configuration driver for handling device -configuration requests. - -""" class LbGenericConfigDriver(object): + """ Loadbalancer generic configuration driver class for handling device + configuration requests. """ - Driver class for implementing loadbalancer configuration - requests from Orchestrator. - """ - def __init__(self): pass def configure_interfaces(self, context, resource_data): """ Configure interfaces for the service VM. - Calls static IP configuration function and implements - persistent rule addition in the service VM. - Issues REST call to service VM for configuration of interfaces. - + Internally it configures log forwarding in service vm :param context: neutron context - :param resource_data: a dictionary of loadbalancer objects - send by neutron plugin + :param resource_data: resource data containing service vm + related details - Returns: SUCCESS/Failure message with reason. + Returns: SUCCESS/FAILED with reason. """ @@ -76,7 +53,6 @@ def configure_interfaces(self, context, resource_data): msg = ("Failed to configure log forwarding for service at %s. " "Error: %s" % (mgmt_ip, err)) LOG.error(msg) - return msg else: if result_log_forward == common_const.UNHANDLED: pass @@ -87,7 +63,6 @@ def configure_interfaces(self, context, resource_data): # Failure in log forward configuration won't break chain # creation. However, error will be logged for detecting # failure. - # return result_log_forward else: msg = ("Configured log forwarding for service at %s. " "Result: %s" % (mgmt_ip, result_log_forward)) @@ -97,6 +72,11 @@ def configure_interfaces(self, context, resource_data): class HaproxyOnVmDriver(LbGenericConfigDriver, base_driver.BaseDriver): + """Main driver which gets registered with LB agent and Generic Config agent + in configurator and these agents pass all *aaS neutron and generic + config requests to this class. + """ + service_type = 'loadbalancer' service_vendor = 'haproxy' pool_to_device = {} @@ -111,7 +91,8 @@ def __init__(self, plugin_rpc=None, conf=None): def _get_rest_client(self, ip_addr): client = haproxy_rest_client.HttpRequests( ip_addr, self.port, - REQUEST_RETRIES, REQUEST_TIMEOUT) + lb_constants.REQUEST_RETRIES, + lb_constants.REQUEST_TIMEOUT) return client def _get_device_for_pool(self, pool_id, context): @@ -155,7 +136,6 @@ def _expand_expected_codes(self, codes): return retval def _prepare_haproxy_frontend(self, vip): - # Prepare the frontend request body vip_ip = vip['address'] vip_port_number = vip['protocol_port'] protocol = vip['protocol'] @@ -163,7 +143,7 @@ def _prepare_haproxy_frontend(self, vip): frontend = { 'option': {}, 'bind': '%s:%d' % (vip_ip, vip_port_number), - 'mode': PROTOCOL_MAP[protocol], + 'mode': lb_constants.PROTOCOL_MAP[protocol], 'default_backend': "bck:%s" % vip['pool_id'] } if vip['connection_limit'] >= 0: @@ -193,8 +173,9 @@ def _prepare_haproxy_backend(self, pool, context): server_addon = '' backend = { - 'mode': '%s' % PROTOCOL_MAP[protocol], - 'balance': '%s' % BALANCE_MAP.get(lb_method, 'roundrobin'), + 'mode': '%s' % lb_constants.PROTOCOL_MAP[protocol], + 'balance': '%s' % lb_constants.BALANCE_MAP.get( + lb_method, 'roundrobin'), 'option': {}, 'timeout': {}, 'server': {} @@ -391,115 +372,79 @@ def _prepare_backend_updating_health_monitor_for_pool(self, health_monitor, return backend def _create_vip(self, vip, device_addr): - # create REST client object try: client = self._get_rest_client(device_addr) - - # Prepare the frontend request body frontend = self._prepare_haproxy_frontend(vip) - body = {"frnt:%s" % vip['id']: frontend} - - # Send REST API request to Haproxy agent on VM client.create_resource("frontend", body) except Exception as e: raise e def _delete_vip(self, vip, device_addr): - # create REST client object try: client = self._get_rest_client(device_addr) - - # Send REST API request to Haproxy agent on VM client.delete_resource("frontend/frnt:%s" % vip['id']) except Exception as e: raise e def _create_pool(self, pool, device_addr, context): - # create REST client object try: client = self._get_rest_client(device_addr) - - # Prepare the backend request body backend = self._prepare_haproxy_backend(pool, context) body = {'bck:%s' % pool['id']: backend} - - # Send REST API request to Haproxy agent on VM client.create_resource("backend", body) except Exception as e: raise e def _delete_pool(self, pool, device_addr): - # create REST client object try: client = self._get_rest_client(device_addr) - - # Send REST API request to Haproxy agent on VM client.delete_resource("backend/bck:%s" % pool['id']) except Exception as e: raise e def _create_member(self, member, device_addr, context): - # create REST client object try: client = self._get_rest_client(device_addr) - - # get backend backend = client.get_resource("backend/bck:%s" % member['pool_id']) - backend = self._prepare_haproxy_backend_with_member( member, backend, context) - - # Send REST API request to Haproxy agent on VM client.update_resource("backend/bck:%s" % member['pool_id'], backend) except Exception as e: raise e def _delete_member(self, member, device_addr): - # create REST client object try: client = self._get_rest_client(device_addr) - - # get backend backend = client.get_resource("backend/bck:%s" % member['pool_id']) # update backend with the server deleted from that del backend['server']['srvr:%s' % member['id']] - - # Send REST API request to Haproxy agent on VM client.update_resource("backend/bck:%s" % member['pool_id'], backend) except Exception as e: raise e def _create_pool_health_monitor(self, hm, pool_id, device_addr): - # create REST client object try: client = self._get_rest_client(device_addr) - backend = client.get_resource("backend/bck:%s" % pool_id) - - # server addon options backend = self._prepare_backend_adding_health_monitor_to_pool( hm, pool_id, backend) - client.update_resource("backend/bck:%s" % pool_id, backend) except Exception as e: raise e def _delete_pool_health_monitor(self, hm, pool_id, device_addr, context): - # create REST client object try: client = self._get_rest_client(device_addr) - backend = client.get_resource("backend/bck:%s" % pool_id) - backend = self._prepare_backend_deleting_health_monitor_from_pool( hm, pool_id, @@ -513,53 +458,6 @@ def _delete_pool_health_monitor(self, hm, pool_id, def get_name(self): return DRIVER_NAME - def deploy_instance(self, logical_config): - # do actual deploy only if vip and pool are configured and active - if (not logical_config or - 'vip' not in logical_config or - (logical_config['vip']['status'] not in - lb_constants.ACTIVE_PENDING_STATUSES) or - not logical_config['vip']['admin_state_up'] or - (logical_config['pool']['status'] not in - lb_constants.ACTIVE_PENDING_STATUSES) or - not logical_config['pool']['admin_state_up']): - return - - try: - device_addr = self._get_device_for_pool( - logical_config['pool']['id']) - - self._create_pool(logical_config['pool'], device_addr) - self._create_vip(logical_config['vip'], device_addr) - for member in logical_config['members']: - self._create_member(member, device_addr) - for hm in logical_config['healthmonitors']: - self._create_pool_health_monitor(hm, - logical_config['pool']['id'], - device_addr) - except Exception as e: - msg = ("Failed to deploy instance. %s" - % str(e).capitalize()) - LOG.error(msg) - raise e - - def undeploy_instance(self, pool_id, context): - try: - device_addr = self._get_device_for_pool(pool_id, context) - logical_device = self.plugin_rpc.get_logical_device(pool_id, - context) - - self._delete_vip(logical_device['vip'], device_addr) - self._delete_pool(logical_device['pool'], device_addr) - except Exception as e: - msg = ("Failed to undeploy instance. %s" - % str(e).capitalize()) - LOG.error(msg) - raise e - - def remove_orphans(self, pol_ids): - raise NotImplementedError - def get_stats(self, pool_id): stats = {} try: @@ -571,8 +469,8 @@ def get_stats(self, pool_id): device_addr = self._get_device_for_pool(pool_id, None) # create REST client object - client = self._get_rest_client(device_addr) + client = self._get_rest_client(device_addr) stats = client.get_resource('stats/%s' % pool_id) for key, value in stats.get('members', {}).items(): @@ -589,7 +487,7 @@ def get_stats(self, pool_id): return stats def create_vip(self, vip, context): - msg = (" create vip [vip=%s ]" % (vip)) + msg = ("Handling create vip [vip=%s]" % (vip)) LOG.info(msg) try: device_addr = self._get_device_for_pool(vip['pool_id'], context) @@ -614,7 +512,7 @@ def create_vip(self, vip, context): LOG.info(msg) def update_vip(self, old_vip, vip, context): - msg = (" update vip [old_vip=%s, vip=%s ]" % (old_vip, vip)) + msg = ("Handling update vip [old_vip=%s, vip=%s]" % (old_vip, vip)) LOG.info(msg) try: device_addr = self._get_device_for_pool(old_vip['pool_id'], @@ -644,13 +542,8 @@ def update_vip(self, old_vip, vip, context): self._create_vip(vip, device_addr) return - # create REST client object client = self._get_rest_client(device_addr) - - # Prepare the frontend request body body = self._prepare_haproxy_frontend(vip) - - # Send REST API request to Haproxy agent on VM client.update_resource("frontend/frnt:%s" % vip['id'], body) except Exception as e: msg = ("Failed to update vip %s. %s" @@ -662,17 +555,13 @@ def update_vip(self, old_vip, vip, context): LOG.info(msg) def delete_vip(self, vip, context): - msg = (" delete vip [vip=%s ]" % (vip)) + msg = ("Handling delete vip [vip=%s]" % (vip)) LOG.info(msg) try: device_addr = self._get_device_for_pool(vip['pool_id'], context) logical_device = self.plugin_rpc.get_logical_device(vip['pool_id'], context) - - # Delete vip from VM self._delete_vip(vip, device_addr) - - # Delete pool from VM pool = logical_device['pool'] self._delete_pool(pool, device_addr) except Exception as e: @@ -686,24 +575,20 @@ def delete_vip(self, vip, context): def create_pool(self, pool, context): # nothing to do here because a pool needs a vip to be useful - msg = ("create pool [pool=%s]" % (pool)) + msg = ("Handled create pool [pool=%s]" % (pool)) LOG.info(msg) - pass def update_pool(self, old_pool, pool, context): - msg = ("update pool [old_pool=%s, pool=%s]" % (old_pool, pool)) + msg = ("Handling update pool [old_pool=%s, pool=%s]" + % (old_pool, pool)) LOG.info(msg) try: device_addr = self._get_device_for_pool(pool['id'], context) if (pool['vip_id'] and device_addr is not None): - # create REST client object client = self._get_rest_client(device_addr) - # Prepare the backend request body for create request backend = self._prepare_haproxy_backend(pool, context) body = backend - - # Send REST API request to Haproxy agent on VM client.update_resource("backend/bck:%s" % pool['id'], body) except Exception as e: msg = ("Failed to update pool from %s to %s. %s" @@ -716,11 +601,11 @@ def update_pool(self, old_pool, pool, context): LOG.info(msg) def delete_pool(self, pool, context): - # if pool is not known, do nothing - msg = ("delete pool [pool=%s]" % (pool)) + msg = ("Handling delete pool [pool=%s]" % (pool)) LOG.info(msg) try: device = HaproxyOnVmDriver.pool_to_device.get(pool['id'], None) + # if pool is not known, do nothing if device is None: return @@ -738,7 +623,7 @@ def delete_pool(self, pool, context): LOG.info(msg) def create_member(self, member, context): - msg = (" create member [member=%s] " % (member)) + msg = ("Handling create member [member=%s] " % (member)) LOG.info(msg) try: device_addr = self._get_device_for_pool(member['pool_id'], context) @@ -754,8 +639,8 @@ def create_member(self, member, context): LOG.info(msg) def update_member(self, old_member, member, context): - msg = (" update member [old_member=%s, member=%s] " % (old_member, - member)) + msg = ("Handling update member [old_member=%s, member=%s] " + % (old_member, member)) LOG.info(msg) try: device_addr = self._get_device_for_pool(old_member['pool_id'], @@ -763,7 +648,6 @@ def update_member(self, old_member, member, context): if device_addr is not None: self._delete_member(old_member, device_addr) - # create the member (new) device_addr = self._get_device_for_pool(member['pool_id'], context) if device_addr is not None: self._create_member(member, device_addr, context) @@ -777,7 +661,7 @@ def update_member(self, old_member, member, context): LOG.info(msg) def delete_member(self, member, context): - msg = (" delete member [member=%s] " % (member)) + msg = ("Handling delete member [member=%s] " % (member)) LOG.info(msg) try: device_addr = self._get_device_for_pool(member['pool_id'], @@ -794,8 +678,7 @@ def delete_member(self, member, context): LOG.info(msg) def create_pool_health_monitor(self, health_monitor, pool_id, context): - # create the health_monitor - msg = ("create pool health monitor [hm=%s, pool_id=%s]" + msg = ("Handling create pool health monitor [hm=%s, pool_id=%s]" % (health_monitor, pool_id)) LOG.info(msg) try: @@ -816,15 +699,13 @@ def create_pool_health_monitor(self, health_monitor, pool_id, context): def update_pool_health_monitor(self, old_health_monitor, health_monitor, pool_id, context): - msg = ("update pool health monitor [old_hm=%s, hm=%s, pool_id=%s]" - % (old_health_monitor, health_monitor, pool_id)) + msg = ("Handling update pool health monitor [old_hm=%s, hm=%s," + "pool_id=%s]" % (old_health_monitor, health_monitor, pool_id)) LOG.info(msg) try: device_addr = self._get_device_for_pool(pool_id, context) if device_addr is not None: - # create REST client object client = self._get_rest_client(device_addr) - backend = client.get_resource("backend/bck:%s" % pool_id) # update backend deleting the health monitor from it @@ -849,7 +730,7 @@ def update_pool_health_monitor(self, old_health_monitor, health_monitor, LOG.info(msg) def delete_pool_health_monitor(self, health_monitor, pool_id, context): - msg = ("delete pool health monitor [hm=%s, pool_id=%s]" + msg = ("Handling delete pool health monitor [hm=%s, pool_id=%s]" % (health_monitor, pool_id)) LOG.info(msg) try: diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v1/haproxy/haproxy_rest_client.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v1/haproxy/haproxy_rest_client.py similarity index 50% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v1/haproxy/haproxy_rest_client.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v1/haproxy/haproxy_rest_client.py index 24c2800817..c32fa277e1 100644 --- a/gbpservice/nfp/configurator/drivers/loadbalancer/v1/haproxy/haproxy_rest_client.py +++ b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v1/haproxy/haproxy_rest_client.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -import httplib import json as jsonutils import requests import urlparse @@ -20,30 +19,9 @@ LOG = nfp_logging.getLogger(__name__) -class RestClientException(Exception): - """Embeds the exceptions thrown by the REST Client.""" - - def __init__(self, status, method, url): - """RestClientException init - - :param status: HTTP Response code - :param method: HTTP Request Method - :param url: REST Server request url - - """ - msg = ("REST Request failed for URL: %s, Method: " - "%s and Response Code: %s" % (url, method, status)) - LOG.error(msg) - super(RestClientException, self).__init__(self, msg) - self.status = status - self.method = method - self.url = url - - class HttpRequests(object): - """Encapsulates the Python requests module - - Uses python-requests library to perform API request to the REST server + """Encapsulates Python requests module + Uses python-requests library to perform API request to the REST server """ def __init__(self, host, port, retries=0, request_timeout=30): @@ -62,47 +40,26 @@ def do_request(self, method, url=None, headers=None, data=None, response = self.pool.request(method, url=url, headers=headers, data=data, timeout=timeout) - except Exception as err: - msg = ("Failed in performing HTTP request. %s" - % str(err).capitalize()) + except Exception as e: + msg = ("[Request:%s, URL:%s, Body:%s] Failed.Reason:%s" + % (method, url, data, e)) LOG.error(msg) + raise Exception(msg) return response def request(self, method, uri, body=None, content_type="application/json"): - """Issue a request to REST API server.""" - headers = {"Content-Type": content_type} url = urlparse.urljoin(self.rest_server_url, uri) - response = None - - try: - response = self.do_request(method, url=url, headers=headers, - data=body, - timeout=self._request_timeout) - - msg = ("Request: %s, URI: %s executed." - % (method, (self.rest_server_url + uri))) - LOG.debug(msg) - except httplib.IncompleteRead as err: - response = err.partial - msg = ("Request failed in REST Api Server. %s" - % str(err).capitalize()) - LOG.error(msg) - except Exception as err: - msg = ("Request failed in REST Api Server. %s" - % str(err).capitalize()) - LOG.error(msg) - + response = self.do_request(method, url=url, headers=headers, + data=body, + timeout=self._request_timeout) if response is None: - # Request was timed out. - msg = ("Response is Null, Request for method: %s to " - "URI: %s timed out" % (method, uri)) + msg = ("[Request:%s, URL:%s, Body:%s] Failed.HTTP response is None" + ".Request timed out" % (method, url, body)) LOG.error(msg) - # TODO(Magesh): Use constants defined in requests or httplib - # for checking error codes - raise RestClientException(status=408, method=method, url=url) + raise Exception(msg) status = response.status_code # Not Found (404) is OK for DELETE. Ignore it here @@ -111,15 +68,13 @@ def request(self, method, uri, body=None, elif status not in (200, 201, 204): # requests.codes.ok = 200, requests.codes.created = 201, # requests.codes.no_content = 204 - msg = ("Unexpected response code %s from REST " - "API Server for %s to %s" - % (status, method, url)) + msg = ("[Request:%s, URL:%s, Body:%s] Failed with status:%s" + % (method, url, body, status)) LOG.error(msg) - raise RestClientException(status=status, method=method, - url=self.rest_server_url + uri) + raise Exception(msg) else: - msg = ("Success: %s, url: %s and status: %s" - % (method, (self.rest_server_url + uri), status)) + msg = ("[Request:%s, URL:%s, Body:%s] executed successfully" + % (method, url, body)) LOG.debug(msg) response.body = response.content return response @@ -140,12 +95,3 @@ def delete_resource(self, resource_path): def get_resource(self, resource_path): response = self.request("GET", resource_path) return response.json() - - def list_resources(self, resource_path): - response = self.request("GET", resource_path) - return response.json() - - def sync_config(self, resource_path, resource_data): - response = self.request("POST", resource_path, - jsonutils.dumps(resource_data)) - return response.json() diff --git a/gbpservice/nfp/config_orchestrator/common/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/__init__.py similarity index 100% rename from gbpservice/nfp/config_orchestrator/common/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/__init__.py diff --git a/gbpservice/nfp/config_orchestrator/handlers/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/__init__.py similarity index 100% rename from gbpservice/nfp/config_orchestrator/handlers/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/haproxy_driver.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/haproxy_driver.py similarity index 97% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/haproxy_driver.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/haproxy_driver.py index 47cd357c61..5d539a3055 100644 --- a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/haproxy_driver.py +++ b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/haproxy_driver.py @@ -17,20 +17,20 @@ from neutron_lbaas.drivers import driver_base as n_driver_base from gbpservice.nfp.common import exceptions -from gbpservice.nfp.configurator.drivers.base import base_driver -from gbpservice.nfp.configurator.drivers.loadbalancer.\ +from gbpservice.contrib.nfp.configurator.drivers.base import base_driver +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.\ v2.haproxy import neutron_lbaas_data_models as n_data_models -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ common import data_models as o_data_models -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ common import constants -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ network import data_models as network_data_models -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.\ rest_api_driver import HaproxyAmphoraLoadBalancerDriver -from gbpservice.nfp.configurator.lib import constants as common_const -from gbpservice.nfp.configurator.lib import lb_constants -from gbpservice.nfp.configurator.lib import lbv2_constants +from gbpservice.contrib.nfp.configurator.lib import constants as common_const +from gbpservice.contrib.nfp.configurator.lib import lb_constants +from gbpservice.contrib.nfp.configurator.lib import lbv2_constants from gbpservice.nfp.core import log as nfp_logging DRIVER_NAME = 'loadbalancerv2' diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/neutron_lbaas_data_models.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/neutron_lbaas_data_models.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/neutron_lbaas_data_models.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/neutron_lbaas_data_models.py diff --git a/gbpservice/nfp/config_orchestrator/handlers/config/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/__init__.py similarity index 100% rename from gbpservice/nfp/config_orchestrator/handlers/config/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/__init__.py diff --git a/gbpservice/nfp/config_orchestrator/handlers/event/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/__init__.py similarity index 100% rename from gbpservice/nfp/config_orchestrator/handlers/event/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/__init__.py diff --git a/gbpservice/nfp/config_orchestrator/handlers/notification/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/driver_exceptions/__init__.py similarity index 100% rename from gbpservice/nfp/config_orchestrator/handlers/notification/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/driver_exceptions/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/driver_exceptions/exceptions.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/driver_exceptions/exceptions.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/driver_exceptions/exceptions.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/driver_exceptions/exceptions.py diff --git a/gbpservice/nfp/config_orchestrator/modules/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/__init__.py similarity index 100% rename from gbpservice/nfp/config_orchestrator/modules/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/driver_base.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/driver_base.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/driver_base.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/driver_base.py diff --git a/gbpservice/nfp/configurator/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/haproxy/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/haproxy/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/haproxy/data_models.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/haproxy/data_models.py similarity index 97% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/haproxy/data_models.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/haproxy/data_models.py index c1e028ce2f..f70b9305bd 100644 --- a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/haproxy/data_models.py +++ b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/haproxy/data_models.py @@ -12,7 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ common import data_models as models diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/haproxy/exceptions.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/haproxy/exceptions.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/haproxy/exceptions.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/haproxy/exceptions.py diff --git a/gbpservice/nfp/configurator/agents/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/agents/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/constants.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/constants.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/constants.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/constants.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/data_models.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/data_models.py similarity index 99% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/data_models.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/data_models.py index ec853aa6c5..445c17952f 100644 --- a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/data_models.py +++ b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/data_models.py @@ -18,7 +18,7 @@ from sqlalchemy.orm import collections -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ common import constants diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/exceptions.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/exceptions.py similarity index 97% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/exceptions.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/exceptions.py index 4f552d112e..942f652072 100644 --- a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/exceptions.py +++ b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/exceptions.py @@ -20,9 +20,9 @@ from oslo_utils import excutils from webob import exc -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ i18n import _LE -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ i18n import _LI diff --git a/gbpservice/nfp/configurator/api/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/api/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/__init__.py diff --git a/gbpservice/nfp/configurator/api/v1/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/api/v1/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/jinja_cfg.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/jinja_cfg.py similarity index 98% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/jinja_cfg.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/jinja_cfg.py index d0f0736ef7..454b52891f 100644 --- a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/jinja_cfg.py +++ b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/jinja_cfg.py @@ -19,9 +19,9 @@ import six from oslo_config import cfg -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ common import constants -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ common import utils as octavia_utils PROTOCOL_MAP = { diff --git a/gbpservice/nfp/configurator/config/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/templates/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/config/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/templates/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/templates/base.j2 b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/templates/base.j2 similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/templates/base.j2 rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/templates/base.j2 diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/templates/haproxy.cfg.j2 b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/templates/haproxy.cfg.j2 similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/templates/haproxy.cfg.j2 rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/templates/haproxy.cfg.j2 diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/templates/macros.j2 b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/templates/macros.j2 similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/templates/macros.j2 rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/templates/macros.j2 diff --git a/gbpservice/nfp/configurator/drivers/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/tls_utils/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/tls_utils/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/tls_utils/cert_parser.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/tls_utils/cert_parser.py similarity index 96% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/tls_utils/cert_parser.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/tls_utils/cert_parser.py index 776a4a70be..2038604b55 100644 --- a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/tls_utils/cert_parser.py +++ b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/tls_utils/cert_parser.py @@ -19,11 +19,11 @@ from oslo_log import log as logging import six -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ common import data_models as data_models -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ common import exceptions -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ i18n import _LE diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/utils.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/utils.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/utils.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/utils.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/i18n.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/i18n.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/i18n.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/i18n.py diff --git a/gbpservice/nfp/configurator/drivers/base/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/network/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/base/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/network/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/network/data_models.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/network/data_models.py similarity index 97% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/network/data_models.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/network/data_models.py index 9e74c0c9ed..124bba7037 100644 --- a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/network/data_models.py +++ b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/network/data_models.py @@ -12,7 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ common import data_models diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/rest_api_driver.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/rest_api_driver.py similarity index 95% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/rest_api_driver.py rename to gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/rest_api_driver.py index 6010d84179..9ab9bd52e0 100644 --- a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/rest_api_driver.py +++ b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/rest_api_driver.py @@ -23,22 +23,22 @@ from oslo_config import cfg -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ amphorae.driver_exceptions import exceptions as driver_except -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ amphorae.drivers import driver_base as driver_base -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ amphorae.drivers.haproxy import exceptions as exc # TODO(jiahao): drop vrrp temporarily -# from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib. +# from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib. # amphorae.drivers.keepalived import vrrp_rest_driver -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ common.jinja.haproxy import jinja_cfg -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ common import constants -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ common.tls_utils import cert_parser -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy.octavia_lib.\ i18n import _LW from gbpservice.nfp.core import log as nfp_logging diff --git a/gbpservice/nfp/configurator/drivers/firewall/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/nfp_service/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/firewall/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/nfp_service/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/firewall/asav/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/nfp_service/heat/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/firewall/asav/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/nfp_service/heat/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/nfp_service/heat/heat_driver.py b/gbpservice/contrib/nfp/configurator/drivers/nfp_service/heat/heat_driver.py similarity index 67% rename from gbpservice/nfp/configurator/drivers/nfp_service/heat/heat_driver.py rename to gbpservice/contrib/nfp/configurator/drivers/nfp_service/heat/heat_driver.py index fea3fe03bf..3ee2b242a5 100644 --- a/gbpservice/nfp/configurator/drivers/nfp_service/heat/heat_driver.py +++ b/gbpservice/contrib/nfp/configurator/drivers/nfp_service/heat/heat_driver.py @@ -13,22 +13,23 @@ from gbpservice.nfp.core import log as nfp_logging -from gbpservice.nfp.configurator.drivers.base import base_driver -from gbpservice.nfp.configurator.lib import nfp_service_constants as const +from gbpservice.contrib.nfp.configurator.drivers.base import base_driver +from gbpservice.contrib.nfp.configurator.lib import ( + nfp_service_constants as const) LOG = nfp_logging.getLogger(__name__) -""" Heat as a driver for handling config script -heat configuration requests. -We initialize service type in this class because agent loads -class object only for those driver classes that have service type -initialized. Also, only this driver class is exposed to the agent. +class HeatDriver(base_driver.BaseDriver): + """ Heat as a driver for handling config script + heat configuration requests. -""" + We initialize service type in this class because agent loads + class object only for those driver classes that have service type + initialized. Also, only this driver class is exposed to the agent. + """ -class HeatDriver(base_driver.BaseDriver): service_type = const.SERVICE_TYPE resource_type = const.HEAT_RESOURCE diff --git a/gbpservice/nfp/configurator/drivers/firewall/paloalto/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/vpn/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/firewall/paloalto/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/vpn/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/firewall/vyos/__init__.py b/gbpservice/contrib/nfp/configurator/drivers/vpn/vyos/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/firewall/vyos/__init__.py rename to gbpservice/contrib/nfp/configurator/drivers/vpn/vyos/__init__.py diff --git a/gbpservice/nfp/configurator/api/v1/app.py b/gbpservice/contrib/nfp/configurator/drivers/vpn/vyos/vyos_vpn_constants.py similarity index 74% rename from gbpservice/nfp/configurator/api/v1/app.py rename to gbpservice/contrib/nfp/configurator/drivers/vpn/vyos/vyos_vpn_constants.py index f65e34406b..aa8c157a74 100644 --- a/gbpservice/nfp/configurator/api/v1/app.py +++ b/gbpservice/contrib/nfp/configurator/drivers/vpn/vyos/vyos_vpn_constants.py @@ -10,15 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import pecan +SERVICE_VENDOR = 'vyos' +CONFIGURATION_SERVER_PORT = 8888 +request_url = "http://%s:%s/%s" -def setup_app(config): - - app_conf = dict(config.app) - - return pecan.make_app( - app_conf.pop('root'), - logging=getattr(config, 'logging', {}), - **app_conf - ) +REST_TIMEOUT = 90 diff --git a/gbpservice/nfp/configurator/drivers/vpn/vyos/vyos_vpn_driver.py b/gbpservice/contrib/nfp/configurator/drivers/vpn/vyos/vyos_vpn_driver.py similarity index 92% rename from gbpservice/nfp/configurator/drivers/vpn/vyos/vyos_vpn_driver.py rename to gbpservice/contrib/nfp/configurator/drivers/vpn/vyos/vyos_vpn_driver.py index 5a28cf0801..e948735ba3 100644 --- a/gbpservice/nfp/configurator/drivers/vpn/vyos/vyos_vpn_driver.py +++ b/gbpservice/contrib/nfp/configurator/drivers/vpn/vyos/vyos_vpn_driver.py @@ -14,8 +14,11 @@ import copy import requests -from gbpservice.nfp.configurator.drivers.base import base_driver -from gbpservice.nfp.configurator.lib import vpn_constants as const +from gbpservice.contrib.nfp.configurator.drivers.base import base_driver +from gbpservice.contrib.nfp.configurator.drivers.vpn.vyos import ( + vyos_vpn_constants as const) +from gbpservice.contrib.nfp.configurator.lib import constants as common_const +from gbpservice.contrib.nfp.configurator.lib import vpn_constants as vpn_const from gbpservice.nfp.core import log as nfp_logging from oslo_concurrency import lockutils @@ -37,8 +40,8 @@ class InvalidRsrcType(Exception): class ResourceErrorState(Exception): - message = "Resource '%(name)s' : '%(id)s' \ - went to error state, %(message)" + message = ("Resource '%(name)s' : '%(id)s' " + "went to error state, %(message)") class RestApi(object): @@ -234,7 +237,8 @@ def _error_state(self, context, vpnsvc, message=''): Returns: None """ self.agent.update_status( - context, self._update_service_status(vpnsvc, const.STATE_ERROR)) + context, self._update_service_status(vpnsvc, + vpn_const.STATE_ERROR)) raise ResourceErrorState(name='vpn_service', id=vpnsvc['id'], message=message) @@ -249,7 +253,8 @@ def _active_state(self, context, vpnsvc): Returns: None """ self.agent.update_status( - context, self._update_service_status(vpnsvc, const.STATE_ACTIVE)) + context, self._update_service_status(vpnsvc, + vpn_const.STATE_ACTIVE)) def _get_local_cidr(self, vpn_svc): svc_desc = vpn_svc['description'] @@ -290,14 +295,13 @@ def validate(self, context, vpnsvc): self._active_state(context, vpnsvc) -class VpnGenericConfigDriver(object): +class VpnGenericConfigDriver(base_driver.BaseDriver): """ VPN generic config driver for handling device configurations requests. This driver class implements VPN configuration. """ - def __init__(self, conf): - self.conf = conf + def __init__(self): self.timeout = const.REST_TIMEOUT def _configure_static_ips(self, resource_data): @@ -365,7 +369,7 @@ def _configure_static_ips(self, resource_data): msg = ("Static IPs successfully added.") LOG.info(msg) - return const.STATUS_SUCCESS + return common_const.STATUS_SUCCESS def configure_interfaces(self, context, resource_data): """ Configure interfaces for the service VM. @@ -381,6 +385,25 @@ def configure_interfaces(self, context, resource_data): Returns: SUCCESS/Failure message with reason. """ + mgmt_ip = resource_data['mgmt_ip'] + + try: + result_log_forward = self._configure_log_forwarding( + const.request_url, mgmt_ip, self.port) + except Exception as err: + msg = ("Failed to configure log forwarding for service at %s. " + "Error: %s" % (mgmt_ip, err)) + LOG.error(msg) + else: + if result_log_forward == common_const.UNHANDLED: + pass + elif result_log_forward != common_const.STATUS_SUCCESS: + # Failure in log forward configuration won't break chain + # creation. However, error will be logged for detecting + # failure. + msg = ("Failed to configure log forwarding for service at %s. " + "Error: %s" % (mgmt_ip, result_log_forward)) + LOG.error(msg) try: result_static_ips = self._configure_static_ips(resource_data) @@ -389,7 +412,7 @@ def configure_interfaces(self, context, resource_data): LOG.error(msg) return msg else: - if result_static_ips != const.STATUS_SUCCESS: + if result_static_ips != common_const.STATUS_SUCCESS: return result_static_ips else: msg = ("Added static IPs. Result: %s" % result_static_ips) @@ -399,8 +422,6 @@ def configure_interfaces(self, context, resource_data): provider_mac=resource_data['provider_mac'], stitching_mac=resource_data['stitching_mac']) - mgmt_ip = resource_data['mgmt_ip'] - url = const.request_url % (mgmt_ip, const.CONFIGURATION_SERVER_PORT, 'add_rule') data = jsonutils.dumps(rule_info) @@ -436,7 +457,7 @@ def configure_interfaces(self, context, resource_data): msg = ("Persistent rule successfully added.") LOG.info(msg) - return const.STATUS_SUCCESS + return common_const.STATUS_SUCCESS def _clear_static_ips(self, resource_data): """ Clear static IPs for provider and stitching @@ -498,7 +519,7 @@ def _clear_static_ips(self, resource_data): msg = ("Static IPs successfully removed.") LOG.info(msg) - return const.STATUS_SUCCESS + return common_const.STATUS_SUCCESS def clear_interfaces(self, context, resource_data): """ Clear interfaces for the service VM. @@ -522,7 +543,7 @@ def clear_interfaces(self, context, resource_data): LOG.error(msg) return msg else: - if result_static_ips != const.STATUS_SUCCESS: + if result_static_ips != common_const.STATUS_SUCCESS: return result_static_ips else: msg = ("Successfully removed static IPs. " @@ -570,7 +591,7 @@ def clear_interfaces(self, context, resource_data): raise Exception(msg) msg = ("Persistent rule successfully deleted.") LOG.info(msg) - return const.STATUS_SUCCESS + return common_const.STATUS_SUCCESS def configure_routes(self, context, resource_data): """ Configure routes for the service VM. @@ -589,10 +610,6 @@ def configure_routes(self, context, resource_data): source_cidrs = resource_data.get('source_cidrs') gateway_ip = resource_data.get('gateway_ip') - # REVISIT(VK): This was all along bad way, don't know why at all it - # was done like this. - - # adding stitching gateway route stitching_url = const.request_url % (mgmt_ip, const.CONFIGURATION_SERVER_PORT, 'add-stitching-route') @@ -632,7 +649,7 @@ def configure_routes(self, context, resource_data): LOG.error(msg) return msg - if resp.status_code in const.SUCCESS_CODES: + if resp.status_code in common_const.SUCCESS_CODES: message = jsonutils.loads(resp.text) if message.get("status", False): msg = ("Route configured successfully for VYOS" @@ -650,7 +667,7 @@ def configure_routes(self, context, resource_data): % (active_configured)) LOG.info(msg) if active_configured: - return const.STATUS_SUCCESS + return common_const.STATUS_SUCCESS else: return ("Failed to configure source route. Response code: %s." "Response Content: %r" % (resp.status_code, resp.content)) @@ -686,8 +703,6 @@ def clear_routes(self, context, resource_data): LOG.error(msg) return msg - # REVISIT(VK): This was all along bad way, don't know why at all it - # was done like this. active_configured = False url = const.request_url % (mgmt_ip, const.CONFIGURATION_SERVER_PORT, 'delete-source-route') @@ -712,30 +727,31 @@ def clear_routes(self, context, resource_data): LOG.error(msg) return msg - if resp.status_code in const.SUCCESS_CODES: + if resp.status_code in common_const.SUCCESS_CODES: active_configured = True msg = ("Route deletion status : %r " % (active_configured)) LOG.info(msg) if active_configured: - return const.STATUS_SUCCESS + return common_const.STATUS_SUCCESS else: return ("Failed to delete source route. Response code: %s." "Response Content: %r" % (resp.status_code, resp.content)) -class VpnaasIpsecDriver(VpnGenericConfigDriver, base_driver.BaseDriver): +class VpnaasIpsecDriver(VpnGenericConfigDriver): """ Driver class for implementing VPN IPSEC configuration requests from VPNaas Plugin. """ - service_type = const.SERVICE_TYPE + service_type = vpn_const.SERVICE_TYPE service_vendor = const.SERVICE_VENDOR def __init__(self, conf): self.conf = conf + self.port = const.CONFIGURATION_SERVER_PORT self.handlers = { 'vpn_service': { 'create': self.create_vpn_service}, @@ -743,7 +759,7 @@ def __init__(self, conf): 'create': self.create_ipsec_conn, 'update': self.update_ipsec_conn, 'delete': self.delete_ipsec_conn}} - super(VpnaasIpsecDriver, self).__init__(conf) + super(VpnaasIpsecDriver, self).__init__() def _update_conn_status(self, conn, status): """ @@ -781,7 +797,7 @@ def _error_state(self, context, conn, message=''): self.agent.update_status( context, self._update_conn_status(conn, - const.STATE_ERROR)) + vpn_const.STATE_ERROR)) raise ResourceErrorState(id=conn['id'], message=message) def _init_state(self, context, conn): @@ -798,11 +814,11 @@ def _init_state(self, context, conn): LOG.info(msg) self.agent.update_status( context, self._update_conn_status(conn, - const.STATE_INIT)) + vpn_const.STATE_INIT)) for item in context['service_info']['ipsec_site_conns']: if item['id'] == conn['id']: - item['status'] = const.STATE_INIT + item['status'] = vpn_const.STATE_INIT def _get_fip_from_vpnsvc(self, vpn_svc): svc_desc = vpn_svc['description'] @@ -820,11 +836,8 @@ def _get_ipsec_tunnel_local_cidr_from_vpnsvc(self, vpn_svc): return tunnel_local_cidr def _get_ipsec_tunnel_local_cidr(self, svc_context): - # Provider PTG is local cidr for the tunnel - # - which is passed in svc description as of now - return self.\ - _get_ipsec_tunnel_local_cidr_from_vpnsvc( - svc_context['service']) + return self._get_ipsec_tunnel_local_cidr_from_vpnsvc( + svc_context['service']) def _get_stitching_fixed_ip(self, conn): desc = conn['description'] @@ -881,8 +894,7 @@ def _ipsec_create_conn(self, context, mgmt_fip, conn): svc_context = self.agent.get_vpn_servicecontext( context, self._get_filters(conn_id=conn['id']))[0] - tunnel_local_cidr = self.\ - _get_ipsec_tunnel_local_cidr(svc_context) + tunnel_local_cidr = self._get_ipsec_tunnel_local_cidr(svc_context) conn = svc_context['siteconns'][0]['connection'] svc_context['siteconns'][0]['connection']['stitching_fixed_ip'] = ( self._get_stitching_fixed_ip(conn)) @@ -909,8 +921,7 @@ def _ipsec_create_tunnel(self, context, mgmt_fip, conn): svc_context = self.agent.get_vpn_servicecontext( context, self._get_filters(conn_id=conn['id']))[0] - tunnel_local_cidr = self.\ - _get_ipsec_tunnel_local_cidr(svc_context) + tunnel_local_cidr = self._get_ipsec_tunnel_local_cidr(svc_context) tunnel = {} tunnel['peer_address'] = conn['peer_address'] @@ -982,7 +993,7 @@ def _ipsec_get_tenant_conns(self, context, mgmt_fip, conn, copy_conns = copy.deepcopy(conn_list) for tconn in copy_conns: if tconn['status'] == ( - const.STATE_PENDING and tconn in conn_list): + vpn_const.STATE_PENDING and tconn in conn_list): conn_list.remove(tconn) return conn_list @@ -1014,8 +1025,7 @@ def _ipsec_delete_tunnel(self, context, mgmt_fip, Returns: None """ - lcidr = self.\ - _get_ipsec_tunnel_local_cidr_from_vpnsvc(conn) + lcidr = self._get_ipsec_tunnel_local_cidr_from_vpnsvc(conn) tunnel = {} tunnel['peer_address'] = conn['peer_address'] @@ -1066,9 +1076,8 @@ def _ipsec_is_state_changed(self, svc_context, conn, fip): """ c_state = None - lcidr = self.\ - _get_ipsec_tunnel_local_cidr(svc_context) - if conn['status'] == const.STATE_INIT: + lcidr = self._get_ipsec_tunnel_local_cidr(svc_context) + if conn['status'] == vpn_const.STATE_INIT: tunnel = { 'peer_address': conn['peer_address'], 'local_cidr': lcidr, @@ -1078,12 +1087,12 @@ def _ipsec_is_state_changed(self, svc_context, conn, fip): tunnel) state = output['state'] - if state.upper() == 'UP' and\ - conn['status'] != const.STATE_ACTIVE: - c_state = const.STATE_ACTIVE - if state.upper() == 'DOWN' and\ - conn['status'] == const.STATE_ACTIVE: - c_state = const.STATE_PENDING + if state.upper() == 'UP' and ( + conn['status'] != vpn_const.STATE_ACTIVE): + c_state = vpn_const.STATE_ACTIVE + if state.upper() == 'DOWN' and ( + conn['status'] == vpn_const.STATE_ACTIVE): + c_state = vpn_const.STATE_PENDING if c_state: return c_state, True @@ -1268,19 +1277,3 @@ def _vpnservice_updated(context, resource_data): self.handlers[rsrc][reason](context, resource_data) return _vpnservice_updated(context, resource_data) - - def configure_healthmonitor(self, context, resource_data): - """Overriding BaseDriver's configure_healthmonitor(). - It does netcat to CONFIGURATION_SERVER_PORT 8888. - Configuration agent runs inside service vm.Once agent is up and - reachable, service vm is assumed to be active. - :param context - context - :param resource_data - resource_data coming from orchestrator - - Returns: SUCCESS/FAILED - - """ - ip = resource_data.get('mgmt_ip') - port = str(const.CONFIGURATION_SERVER_PORT) - command = 'nc ' + ip + ' ' + port + ' -z' - return self._check_vm_health(command) diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/__init__.py b/gbpservice/contrib/nfp/configurator/lib/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/__init__.py rename to gbpservice/contrib/nfp/configurator/lib/__init__.py diff --git a/gbpservice/nfp/configurator/lib/config_opts.py b/gbpservice/contrib/nfp/configurator/lib/config_opts.py similarity index 100% rename from gbpservice/nfp/configurator/lib/config_opts.py rename to gbpservice/contrib/nfp/configurator/lib/config_opts.py diff --git a/gbpservice/nfp/configurator/lib/constants.py b/gbpservice/contrib/nfp/configurator/lib/constants.py similarity index 92% rename from gbpservice/nfp/configurator/lib/constants.py rename to gbpservice/contrib/nfp/configurator/lib/constants.py index a607c0344b..b5316fa3c7 100644 --- a/gbpservice/nfp/configurator/lib/constants.py +++ b/gbpservice/contrib/nfp/configurator/lib/constants.py @@ -36,6 +36,8 @@ CREATE = 'create' UPDATE = 'update' DELETE = 'delete' +POST = 'post' +PUT = 'put' UNHANDLED = "UNHANDLED" SUCCESS_CODES = [200, 201, 202, 203, 204] @@ -47,3 +49,6 @@ STATUS_ERROR = "ERROR" STATUS_SUCCESS = "SUCCESS" UNHANDLED = "UNHANDLED" + +AGENTS_PKG = 'gbpservice.contrib.nfp.configurator.agents' +CONFIGURATOR_RPC_TOPIC = 'configurator' diff --git a/gbpservice/nfp/configurator/lib/data_filter.py b/gbpservice/contrib/nfp/configurator/lib/data_filter.py similarity index 98% rename from gbpservice/nfp/configurator/lib/data_filter.py rename to gbpservice/contrib/nfp/configurator/lib/data_filter.py index 2adb4f9ede..a5c417455a 100644 --- a/gbpservice/nfp/configurator/lib/data_filter.py +++ b/gbpservice/contrib/nfp/configurator/lib/data_filter.py @@ -12,13 +12,13 @@ import copy -from gbpservice.nfp.configurator.lib import ( +from gbpservice.contrib.nfp.configurator.lib import ( filter_constants as constants) class Filter(object): - """ - Filter class which provides data asked in a specific format. + """ Filter class which provides data asked in a specific format. + This class mocks all rpc calls going from *aaS agent/driver to respective *aaS plugin. """ @@ -35,6 +35,7 @@ def call(self, context, msg): Returns: data after applying filter on it """ + filters = {} try: for fk, fv in msg['args'].items(): if dict == type(fv): diff --git a/gbpservice/nfp/configurator/lib/demuxer.py b/gbpservice/contrib/nfp/configurator/lib/demuxer.py similarity index 83% rename from gbpservice/nfp/configurator/lib/demuxer.py rename to gbpservice/contrib/nfp/configurator/lib/demuxer.py index 44b829dfba..3c4dd80058 100644 --- a/gbpservice/nfp/configurator/lib/demuxer.py +++ b/gbpservice/contrib/nfp/configurator/lib/demuxer.py @@ -10,55 +10,55 @@ # License for the specific language governing permissions and limitations # under the License. -from gbpservice.nfp.configurator.lib import constants as const +from gbpservice.contrib.nfp.configurator.lib import constants as const from gbpservice.nfp.core import log as nfp_logging LOG = nfp_logging.getLogger(__name__) -"""Implements supporting methods for configurator module. -Provides methods that take configurator API request data and helps -configurator to de-multiplex the API calls to different service agents -and drivers. - -Format of request data for network device configuration API: -request_data { - info { - version: - } - config [ - { +class ServiceAgentDemuxer(object): + """Implements supporting methods for configurator module. + + Provides methods that take configurator API request data and helps + configurator to de-multiplex the API calls to different service agents + and drivers. + + Format of request data for network device configuration API: + request_data { + info { + version: + } + config [ + { + 'resource': , + 'kwargs': + }, + { 'resource': , 'kwargs': - }, - { - 'resource': , - 'kwargs': - }, ... - ] -} -Format of request data for network service configuration API: -request_data { - info { - version: - type: + }, ... + ] } - config [ - { + Format of request data for network service configuration API: + request_data { + info { + version: + type: + } + config [ + { + 'resource': , + 'kwargs': + }, + { 'resource': , 'kwargs': - }, - { - 'resource': , - 'kwargs': - }, ... - ] -} - -""" + }, ... + ] + } + """ -class ServiceAgentDemuxer(object): def __init__(self): pass diff --git a/gbpservice/nfp/configurator/lib/filter_constants.py b/gbpservice/contrib/nfp/configurator/lib/filter_constants.py similarity index 100% rename from gbpservice/nfp/configurator/lib/filter_constants.py rename to gbpservice/contrib/nfp/configurator/lib/filter_constants.py diff --git a/gbpservice/nfp/configurator/lib/fw_constants.py b/gbpservice/contrib/nfp/configurator/lib/fw_constants.py similarity index 91% rename from gbpservice/nfp/configurator/lib/fw_constants.py rename to gbpservice/contrib/nfp/configurator/lib/fw_constants.py index d913f082cc..09df5ca694 100644 --- a/gbpservice/nfp/configurator/lib/fw_constants.py +++ b/gbpservice/contrib/nfp/configurator/lib/fw_constants.py @@ -10,7 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. -DRIVERS_DIR = 'gbpservice.nfp.configurator.drivers.firewall' +DRIVERS_DIR = 'gbpservice.contrib.nfp.configurator.drivers.firewall' SERVICE_TYPE = 'firewall' FIREWALL_CREATE_EVENT = 'CREATE_FIREWALL' diff --git a/gbpservice/nfp/configurator/lib/generic_config_constants.py b/gbpservice/contrib/nfp/configurator/lib/generic_config_constants.py similarity index 94% rename from gbpservice/nfp/configurator/lib/generic_config_constants.py rename to gbpservice/contrib/nfp/configurator/lib/generic_config_constants.py index 8018a7a10f..995235dca1 100644 --- a/gbpservice/nfp/configurator/lib/generic_config_constants.py +++ b/gbpservice/contrib/nfp/configurator/lib/generic_config_constants.py @@ -10,7 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. -DRIVERS_DIR = 'gbpservice.nfp.configurator.drivers' +DRIVERS_DIR = 'gbpservice.contrib.nfp.configurator.drivers' SERVICE_TYPE = 'generic_config' EVENT_CONFIGURE_INTERFACES = 'CONFIGURE_INTERFACES' EVENT_CLEAR_INTERFACES = 'CLEAR_INTERFACES' diff --git a/gbpservice/nfp/configurator/lib/lb_constants.py b/gbpservice/contrib/nfp/configurator/lib/lb_constants.py similarity index 84% rename from gbpservice/nfp/configurator/lib/lb_constants.py rename to gbpservice/contrib/nfp/configurator/lib/lb_constants.py index 1e82774697..331a547401 100644 --- a/gbpservice/nfp/configurator/lib/lb_constants.py +++ b/gbpservice/contrib/nfp/configurator/lib/lb_constants.py @@ -10,7 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. -DRIVERS_DIR = 'gbpservice.nfp.configurator.drivers.loadbalancer.v1' +DRIVERS_DIR = 'gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v1' SERVICE_TYPE = 'loadbalancer' NEUTRON = 'neutron' @@ -61,6 +61,24 @@ LBAAS = 'lbaas' +PROTOCOL_MAP = { + PROTOCOL_TCP: 'tcp', + PROTOCOL_HTTP: 'http', + PROTOCOL_HTTPS: 'https', +} +BALANCE_MAP = { + LB_METHOD_ROUND_ROBIN: 'roundrobin', + LB_METHOD_LEAST_CONNECTIONS: 'leastconn', + LB_METHOD_SOURCE_IP: 'source' +} +REQUEST_RETRIES = 0 +REQUEST_TIMEOUT = 120 + +# Operations +CREATE = 'create' +UPDATE = 'update' +DELETE = 'delete' + """ Event ids """ EVENT_CREATE_POOL = 'CREATE_POOL' EVENT_UPDATE_POOL = 'UPDATE_POOL' diff --git a/gbpservice/nfp/configurator/lib/lbv2_constants.py b/gbpservice/contrib/nfp/configurator/lib/lbv2_constants.py similarity index 97% rename from gbpservice/nfp/configurator/lib/lbv2_constants.py rename to gbpservice/contrib/nfp/configurator/lib/lbv2_constants.py index 56af89d8c0..575dc454ac 100644 --- a/gbpservice/nfp/configurator/lib/lbv2_constants.py +++ b/gbpservice/contrib/nfp/configurator/lib/lbv2_constants.py @@ -10,7 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. -DRIVERS_DIR = 'gbpservice.nfp.configurator.drivers.loadbalancer.v2' +DRIVERS_DIR = 'gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2' SERVICE_TYPE = 'loadbalancerv2' NEUTRON = 'neutron' diff --git a/gbpservice/nfp/configurator/lib/nfp_service_constants.py b/gbpservice/contrib/nfp/configurator/lib/nfp_service_constants.py similarity index 91% rename from gbpservice/nfp/configurator/lib/nfp_service_constants.py rename to gbpservice/contrib/nfp/configurator/lib/nfp_service_constants.py index 853c0398e2..146a19253c 100644 --- a/gbpservice/nfp/configurator/lib/nfp_service_constants.py +++ b/gbpservice/contrib/nfp/configurator/lib/nfp_service_constants.py @@ -10,7 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. -DRIVERS_DIR = 'gbpservice.nfp.configurator.drivers.nfp_service' +DRIVERS_DIR = 'gbpservice.contrib.nfp.configurator.drivers.nfp_service' SERVICE_TYPE = 'nfp_service' CREATE_NFP_SERVICE_EVENT = 'CREATE_NFP_SERVICE' UNHANDLED_RESULT = 'unhandled' diff --git a/gbpservice/nfp/configurator/lib/schema.py b/gbpservice/contrib/nfp/configurator/lib/schema.py similarity index 100% rename from gbpservice/nfp/configurator/lib/schema.py rename to gbpservice/contrib/nfp/configurator/lib/schema.py diff --git a/gbpservice/nfp/configurator/lib/schema_validator.py b/gbpservice/contrib/nfp/configurator/lib/schema_validator.py similarity index 90% rename from gbpservice/nfp/configurator/lib/schema_validator.py rename to gbpservice/contrib/nfp/configurator/lib/schema_validator.py index 1b18c40568..a25ee06a1e 100644 --- a/gbpservice/nfp/configurator/lib/schema_validator.py +++ b/gbpservice/contrib/nfp/configurator/lib/schema_validator.py @@ -12,18 +12,17 @@ from gbpservice.nfp.core import log as nfp_logging -from gbpservice.nfp.configurator.lib import constants as const -import gbpservice.nfp.configurator.lib.schema as schema +from gbpservice.contrib.nfp.configurator.lib import constants as const +import gbpservice.contrib.nfp.configurator.lib.schema as schema LOG = nfp_logging.getLogger(__name__) -""" Validates request data against standard resource schemas given in schema.py - - Validation is focused on keys. It cross checks if resources in - request_data has all the keys given in the schema of that resource. -""" - class SchemaValidator(object): + """ Validates request data against standard resource schemas given in schema.py + + Validation is focused on keys. It cross checks if resources in + request_data has all the keys given in the schema of that resource. + """ def decode(self, request_data, is_generic_config): """ Validate request data against resource schema. diff --git a/gbpservice/nfp/configurator/lib/utils.py b/gbpservice/contrib/nfp/configurator/lib/utils.py similarity index 95% rename from gbpservice/nfp/configurator/lib/utils.py rename to gbpservice/contrib/nfp/configurator/lib/utils.py index c55e99dd59..4fc866d650 100644 --- a/gbpservice/nfp/configurator/lib/utils.py +++ b/gbpservice/contrib/nfp/configurator/lib/utils.py @@ -17,12 +17,13 @@ LOG = nfp_logging.getLogger(__name__) -"""Utility class which provides common library functions for configurator. - New common library functions, if needed, should be added in this class. -""" - class ConfiguratorUtils(object): + """Utility class which provides common library functions for configurator. + + New common library functions, if needed, should be added in this class. + """ + def __init__(self): pass diff --git a/gbpservice/contrib/nfp/configurator/lib/vpn_constants.py b/gbpservice/contrib/nfp/configurator/lib/vpn_constants.py new file mode 100644 index 0000000000..6009e7b74c --- /dev/null +++ b/gbpservice/contrib/nfp/configurator/lib/vpn_constants.py @@ -0,0 +1,27 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +DRIVERS_DIR = 'gbpservice.contrib.nfp.configurator.drivers.vpn' + +SERVICE_TYPE = 'vpn' + + +STATE_PENDING = 'PENDING_CREATE' +STATE_INIT = 'INIT' +STATE_ACTIVE = 'ACTIVE' +STATE_ERROR = 'ERROR' + + +VPN_GENERIC_CONFIG_RPC_TOPIC = "vyos_vpn_topic" + +VPN_PLUGIN_TOPIC = 'vpn_plugin' +VPN_AGENT_TOPIC = 'vpn_agent' diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v1/__init__.py b/gbpservice/contrib/nfp/configurator/modules/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v1/__init__.py rename to gbpservice/contrib/nfp/configurator/modules/__init__.py diff --git a/gbpservice/nfp/configurator/modules/configurator.py b/gbpservice/contrib/nfp/configurator/modules/configurator.py similarity index 88% rename from gbpservice/nfp/configurator/modules/configurator.py rename to gbpservice/contrib/nfp/configurator/modules/configurator.py index 80d04a285b..d50528b99c 100644 --- a/gbpservice/nfp/configurator/modules/configurator.py +++ b/gbpservice/contrib/nfp/configurator/modules/configurator.py @@ -11,42 +11,32 @@ # under the License. from oslo_log import helpers as log_helpers -from gbpservice.nfp.core import log as nfp_logging -from gbpservice.nfp.configurator.lib import config_opts -from gbpservice.nfp.configurator.lib import constants as const -from gbpservice.nfp.configurator.lib import demuxer -from gbpservice.nfp.configurator.lib import schema_validator -from gbpservice.nfp.configurator.lib import utils +from gbpservice.contrib.nfp.configurator.lib import config_opts +from gbpservice.contrib.nfp.configurator.lib import constants as const +from gbpservice.contrib.nfp.configurator.lib import demuxer +from gbpservice.contrib.nfp.configurator.lib import schema_validator +from gbpservice.contrib.nfp.configurator.lib import utils from gbpservice.nfp.core import log as nfp_logging from gbpservice.nfp.core import rpc -from neutron.common import rpc as n_rpc -from neutron import context as n_context -import oslo_messaging -import time - LOG = nfp_logging.getLogger(__name__) -AGENTS_PKG = 'gbpservice.nfp.configurator.agents' -CONFIGURATOR_RPC_TOPIC = 'configurator' - -"""Implements procedure calls invoked by an REST server. - -Implements following RPC methods. - - create_network_function_device_config - - delete_network_function_device_config - - update_network_function_device_config - - create_network_function_config - - delete_network_function_config - - update_network_function_config - - get_notifications -Also implements local methods for supporting RPC methods - -""" - class ConfiguratorRpcManager(object): + """Implements procedure calls invoked by an REST server. + + Implements following RPC methods. + - create_network_function_device_config + - delete_network_function_device_config + - update_network_function_device_config + - create_network_function_config + - delete_network_function_config + - update_network_function_config + - get_notifications + Also implements local methods for supporting RPC methods + + """ def __init__(self, sc, cm, conf, demuxer): self.sc = sc @@ -321,16 +311,15 @@ def get_notifications(self, context): LOG.info(msg) return notifications -"""Implements configurator module APIs. - - Implements methods which are either invoked by registered service agents - or by the configurator global methods. The methods invoked by configurator - global methods interface with service agents. -""" +class ConfiguratorModule(object): + """Implements configurator module APIs. + Implements methods which are either invoked by registered service + agents or by the configurator global methods. The methods invoked + by configurator global methods interface with service agents. -class ConfiguratorModule(object): + """ def __init__(self, sc): self.sa_instances = {} @@ -421,7 +410,7 @@ def init_rpc(sc, cm, conf, demuxer): # Initializes RPC client rpc_mgr = ConfiguratorRpcManager(sc, cm, conf, demuxer) configurator_agent = rpc.RpcAgent(sc, - topic=CONFIGURATOR_RPC_TOPIC, + topic=const.CONFIGURATOR_RPC_TOPIC, manager=rpc_mgr) # Registers RPC client object with core service controller @@ -439,7 +428,7 @@ def get_configurator_module_instance(sc): conf_utils = utils.ConfiguratorUtils() # Loads all the service agents under AGENT_PKG module path - cm.imported_sas = conf_utils.load_agents(AGENTS_PKG) + cm.imported_sas = conf_utils.load_agents(const.AGENTS_PKG) msg = ("Configurator loaded service agents from %s location." % (cm.imported_sas)) LOG.info(msg) @@ -495,12 +484,12 @@ def nfp_module_init(sc, conf): init_rpc(sc, cm, conf, demuxer_instance) except Exception as err: msg = ("Failed to initialize configurator RPC with topic %s. %s." - % (CONFIGURATOR_RPC_TOPIC, str(err).capitalize())) + % (const.CONFIGURATOR_RPC_TOPIC, str(err).capitalize())) LOG.error(msg) raise Exception(err) else: msg = ("Initialized configurator RPC with topic %s." - % CONFIGURATOR_RPC_TOPIC) + % const.CONFIGURATOR_RPC_TOPIC) LOG.debug(msg) @@ -520,24 +509,6 @@ def nfp_module_post_init(sc, conf): try: cm = get_configurator_module_instance(sc) cm.init_service_agents_complete(sc, conf) - - #TODO(Rahul):Need to generalize the following code in library. - context = n_context.Context('configurator', 'configrator') - uptime = time.strftime("%c") - request_data = {'eventdata': {'uptime': uptime, - 'module': 'configurator'}, - 'eventid': 'NFP_UP_TIME', - 'eventtype': 'NFP_CONTROLLER'} - API_VERSION = '1.0' - target = oslo_messaging.Target( - topic='visibility', - version=API_VERSION) - client = n_rpc.get_client(target) - cctxt = client.prepare(version=API_VERSION, - topic='visibility') - cctxt.cast(context, - 'network_function_event', request_data=request_data) - except Exception as err: msg = ("Failed to trigger initialization complete for configurator" " agent modules. %s." % (str(err).capitalize())) diff --git a/gbpservice/contrib/nfp/configurator/run.sh b/gbpservice/contrib/nfp/configurator/run.sh new file mode 100644 index 0000000000..73da244bcf --- /dev/null +++ b/gbpservice/contrib/nfp/configurator/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +service rabbitmq-server start +screen -dmS "configurator" /usr/bin/python2 /usr/bin/nfp --config-file=/etc/nfp_configurator.ini --log-file=/var/log/nfp/nfp_configurator.log +cd /usr/local/lib/python2.7/dist-packages/gbpservice/nfp/pecan/api/ +python setup.py develop +screen -dmS "pecan" pecan configurator_decider config.py --mode advanced +/bin/bash + diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v1/haproxy/__init__.py b/gbpservice/contrib/nfp/service_plugins/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v1/haproxy/__init__.py rename to gbpservice/contrib/nfp/service_plugins/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/__init__.py b/gbpservice/contrib/nfp/service_plugins/firewall/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/__init__.py rename to gbpservice/contrib/nfp/service_plugins/firewall/__init__.py diff --git a/gbpservice/nfp/service_plugins/firewall/nfp_fwaas_plugin-liberty.py b/gbpservice/contrib/nfp/service_plugins/firewall/nfp_fwaas_plugin-liberty.py similarity index 99% rename from gbpservice/nfp/service_plugins/firewall/nfp_fwaas_plugin-liberty.py rename to gbpservice/contrib/nfp/service_plugins/firewall/nfp_fwaas_plugin-liberty.py index 82c586b59e..0d50dddc09 100644 --- a/gbpservice/nfp/service_plugins/firewall/nfp_fwaas_plugin-liberty.py +++ b/gbpservice/contrib/nfp/service_plugins/firewall/nfp_fwaas_plugin-liberty.py @@ -16,7 +16,7 @@ from oslo_utils import uuidutils from sqlalchemy import orm -from gbpservice.nfp.config_orchestrator.common import topics +from gbpservice.contrib.nfp.config_orchestrator.common import topics import neutron_fwaas.extensions from neutron_fwaas.services.firewall import fwaas_plugin as ref_fw_plugin diff --git a/gbpservice/nfp/service_plugins/firewall/nfp_fwaas_plugin.py b/gbpservice/contrib/nfp/service_plugins/firewall/nfp_fwaas_plugin.py similarity index 81% rename from gbpservice/nfp/service_plugins/firewall/nfp_fwaas_plugin.py rename to gbpservice/contrib/nfp/service_plugins/firewall/nfp_fwaas_plugin.py index 1584f3d6ab..b2adf780ea 100644 --- a/gbpservice/nfp/service_plugins/firewall/nfp_fwaas_plugin.py +++ b/gbpservice/contrib/nfp/service_plugins/firewall/nfp_fwaas_plugin.py @@ -1,27 +1,38 @@ -from neutron.api.v2 import attributes as attr +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from keystoneclient import exceptions as k_exceptions +from keystoneclient.v2_0 import client as keyclient + +from gbpservice.contrib.nfp.config_orchestrator.common import topics +from gbpservice.common import utils +import netaddr from neutron import context as neutron_context +from neutron.api.v2 import attributes as attr from neutron.common import constants as l3_constants -from neutron import manager -#from neutron.common import common as n_topics from neutron.common import exceptions as n_exc -from neutron.db import models_v2 from neutron.db import l3_db -from neutron.db.l3_db import ( - RouterPort, EXTERNAL_GW_INFO, DEVICE_OWNER_ROUTER_INTF) -from neutron.plugins.common import constants as n_const -import netaddr -from oslo_config import cfg -from oslo_utils import uuidutils -from sqlalchemy import orm +from neutron.db.l3_db import DEVICE_OWNER_ROUTER_INTF +from neutron.db.l3_db import EXTERNAL_GW_INFO +from neutron.db.l3_db import RouterPort +from neutron.db import models_v2 +from neutron.extensions import l3 -from gbpservice.nfp.config_orchestrator.common import topics import neutron_fwaas.extensions from neutron_fwaas.services.firewall import fwaas_plugin as ref_fw_plugin - -from neutron_fwaas.db.firewall import ( - firewall_router_insertion_db as ref_fw_router_ins_db) -from neutron_fwaas.db.firewall import firewall_db as n_firewall +from oslo_config import cfg +from oslo_utils import excutils +from sqlalchemy import orm class NFPFirewallPlugin(ref_fw_plugin.FirewallPlugin): @@ -92,6 +103,9 @@ def _is_net_reachable_from_net(self, context, tenant_id, from_net_id, @param to_net_id: the destination network for the search @return: True or False whether a path exists """ + original_context = context + context = elevate_context(context) + tenant_id = context.tenant_id def nexthop_nets_query(nets, visited): """query networks connected to devices on nets but not visited.""" Port = models_v2.Port @@ -107,11 +121,14 @@ def nexthop_nets_query(nets, visited): nets = set([from_net_id]) while nets: if to_net_id in nets: + context = original_context return True visited |= nets nets = set((tup[0] for tup in nexthop_nets_query(nets, visited))) + context = original_context return False + def _find_net_for_nexthop(self, context, tenant_id, router_id, nexthop): """Find the network to which the nexthop belongs. @@ -123,7 +140,7 @@ def _find_net_for_nexthop(self, context, tenant_id, router_id, nexthop): @return: the network id of the nexthop or None if not found """ interfaces = context.session.query(models_v2.Port).filter_by( - # tenant_id=tenant_id, + tenant_id=tenant_id, device_id=router_id, device_owner=DEVICE_OWNER_ROUTER_INTF) for interface in interfaces: @@ -133,6 +150,7 @@ def _find_net_for_nexthop(self, context, tenant_id, router_id, nexthop): if netaddr.all_matching_cidrs(nexthop, cidrs): return interface['network_id'] + def _find_routers_via_routes_for_floatingip(self, context, internal_port, internal_subnet_id, external_network_id): @@ -153,6 +171,8 @@ def _find_routers_via_routes_for_floatingip(self, context, internal_port, @param external_network_id: the network of the floatingip @return: a sorted list of matching routers """ + original_context = context + context = elevate_context(context) internal_ip_address = [ ip['ip_address'] for ip in internal_port['fixed_ips'] if ip['subnet_id'] == internal_subnet_id @@ -168,7 +188,7 @@ def _find_routers_via_routes_for_floatingip(self, context, internal_port, gw_info = router.get(EXTERNAL_GW_INFO) if not gw_info or gw_info['network_id'] != external_network_id: continue - # find a matching route + # find a matching route if 'routes' not in router: continue cidr_nexthops = {} @@ -184,20 +204,39 @@ def _find_routers_via_routes_for_floatingip(self, context, internal_port, continue # validate that there exists a path to "internal_port" for nexthop in cidr_nexthops[smallest_cidr]: - net_id = self._find_net_for_nexthop(context, tenant_id, + net_id = self._find_net_for_nexthop(context, context.tenant_id, router['id'], nexthop) if net_id and self._is_net_reachable_from_net( context, - tenant_id, + context.tenant_id, net_id, internal_port['network_id']): prefix_routers.append( (smallest_cidr.prefixlen, router['id'])) break - - + context = original_context return [p_r[1] for p_r in sorted(prefix_routers, reverse=True)] +def elevate_context(context): + context = context.elevated() + context.tenant_id = _resource_owner_tenant_id() + return context + + +def _resource_owner_tenant_id(): + user, pwd, tenant, auth_url = utils.get_keystone_creds() + keystoneclient = keyclient.Client(username=user, password=pwd, + auth_url=auth_url) + try: + tenant = keystoneclient.tenants.find(name=tenant) + return tenant.id + except k_exceptions.NotFound: + with excutils.save_and_reraise_exception(reraise=True): + LOG.error(_LE('No tenant with name %s exists.'), tenant) + except k_exceptions.NoUniqueMatch: + with excutils.save_and_reraise_exception(reraise=True): + LOG.error(_LE('Multiple tenants matches found for %s'), tenant) + def _get_router_for_floatingip(self, context, internal_port, internal_subnet_id, diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/__init__.py b/gbpservice/contrib/nfp/service_plugins/loadbalancer/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/__init__.py rename to gbpservice/contrib/nfp/service_plugins/loadbalancer/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/__init__.py b/gbpservice/contrib/nfp/service_plugins/loadbalancer/drivers/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/__init__.py rename to gbpservice/contrib/nfp/service_plugins/loadbalancer/drivers/__init__.py diff --git a/gbpservice/contrib/nfp/service_plugins/loadbalancer/drivers/nfp_lbaas_plugin_driver.py b/gbpservice/contrib/nfp/service_plugins/loadbalancer/drivers/nfp_lbaas_plugin_driver.py new file mode 100644 index 0000000000..7d1c6fd9d1 --- /dev/null +++ b/gbpservice/contrib/nfp/service_plugins/loadbalancer/drivers/nfp_lbaas_plugin_driver.py @@ -0,0 +1,29 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gbpservice.contrib.nfp.config_orchestrator.common import topics +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v1.haproxy\ + import haproxy_lb_driver +from neutron_lbaas.services.loadbalancer.drivers.common import ( + agent_driver_base as adb +) + + +class HaproxyOnVMPluginDriver(adb.AgentDriverBase): + device_driver = haproxy_lb_driver.DRIVER_NAME + + def __init__(self, plugin): + # Monkey patch LB agent topic and LB agent type + adb.l_const.LOADBALANCER_AGENT = topics.LB_NFP_CONFIGAGENT_TOPIC + adb.q_const.AGENT_TYPE_LOADBALANCER = 'NFP Loadbalancer agent' + + super(HaproxyOnVMPluginDriver, self).__init__(plugin) diff --git a/gbpservice/nfp/service_plugins/loadbalancer/drivers/nfp_lbaasv2_plugin_driver.py b/gbpservice/contrib/nfp/service_plugins/loadbalancer/drivers/nfp_lbaasv2_plugin_driver.py similarity index 86% rename from gbpservice/nfp/service_plugins/loadbalancer/drivers/nfp_lbaasv2_plugin_driver.py rename to gbpservice/contrib/nfp/service_plugins/loadbalancer/drivers/nfp_lbaasv2_plugin_driver.py index a818f3357c..aa6b50cb98 100644 --- a/gbpservice/nfp/service_plugins/loadbalancer/drivers/nfp_lbaasv2_plugin_driver.py +++ b/gbpservice/contrib/nfp/service_plugins/loadbalancer/drivers/nfp_lbaasv2_plugin_driver.py @@ -10,10 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -from gbpservice.nfp.config_orchestrator.common import topics -from gbpservice.nfp.configurator.drivers.loadbalancer.v2.haproxy import ( - haproxy_driver -) +from gbpservice.contrib.nfp.config_orchestrator.common import topics +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v2.haproxy\ + import haproxy_driver from neutron_lbaas.drivers.common import agent_driver_base as adb diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/__init__.py b/gbpservice/contrib/nfp/service_plugins/vpn/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/__init__.py rename to gbpservice/contrib/nfp/service_plugins/vpn/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/driver_exceptions/__init__.py b/gbpservice/contrib/nfp/service_plugins/vpn/drivers/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/driver_exceptions/__init__.py rename to gbpservice/contrib/nfp/service_plugins/vpn/drivers/__init__.py diff --git a/gbpservice/nfp/service_plugins/vpn/drivers/nfp_vpnaas_driver.py b/gbpservice/contrib/nfp/service_plugins/vpn/drivers/nfp_vpnaas_driver.py similarity index 93% rename from gbpservice/nfp/service_plugins/vpn/drivers/nfp_vpnaas_driver.py rename to gbpservice/contrib/nfp/service_plugins/vpn/drivers/nfp_vpnaas_driver.py index 0a1211d7cc..323a5bde47 100644 --- a/gbpservice/nfp/service_plugins/vpn/drivers/nfp_vpnaas_driver.py +++ b/gbpservice/contrib/nfp/service_plugins/vpn/drivers/nfp_vpnaas_driver.py @@ -13,21 +13,20 @@ import socket import time -from gbpservice.nfp.config_orchestrator.common import topics -from neutron_lib import exceptions +from gbpservice.contrib.nfp.config_orchestrator.common import topics +from gbpservice.nfp.core import log as nfp_logging from neutron.common import rpc as n_rpc from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron import manager -from neutron_vpnaas.services.vpn.plugin import VPNPlugin +from neutron_lib import exceptions from neutron_vpnaas.services.vpn.plugin import VPNDriverPlugin +from neutron_vpnaas.services.vpn.plugin import VPNPlugin from neutron_vpnaas.services.vpn.service_drivers import base_ipsec -from oslo_log import log as logging import oslo_messaging -LOG = logging.getLogger(__name__) - +LOG = nfp_logging.getLogger(__name__) BASE_VPN_VERSION = '1.0' AGENT_TYPE_VPN = 'NFP Vpn agent' ACTIVE = 'ACTIVE' @@ -93,7 +92,7 @@ def _is_agent_hosting_vpnservice(self, agent): def _get_agent_hosting_vpnservice(self, admin_context, vpnservice_id): filters = {'agent_type': [AGENT_TYPE_VPN]} agents = manager.NeutronManager.get_plugin().get_agents( - admin_context, filters=filters) + admin_context, filters=filters) try: for agent in agents: @@ -109,10 +108,11 @@ def _get_agent_hosting_vpnservice(self, admin_context, vpnservice_id): if not agent['alive']: continue return agent - except: + except Exception: raise VPNAgentNotFound() - LOG.error(_('No active vpn agent found. Configuration will fail.')) + msg = ('No active vpn agent found. Configuration will fail.') + LOG.error(msg) raise VPNAgentHostingServiceNotFound(vpnservice_id=vpnservice_id) def _agent_notification(self, context, method, vpnservice_id, @@ -124,10 +124,12 @@ def _agent_notification(self, context, method, vpnservice_id, vpn_agent = self._get_agent_hosting_vpnservice( admin_context, vpnservice_id) - LOG.debug(_('Notify agent at %(topic)s.%(host)s the message ' - '%(method)s %(args)s'), { - 'topic': self.topic, 'host': vpn_agent['host'], - 'method': method, 'args': kwargs}) + msg = (('Notify agent at %(topic)s.%(host)s the message ' + '%(method)s %(args)s') + % {'topic': self.topic, + 'host': vpn_agent['host'], + 'method': method, 'args': kwargs}) + LOG.debug(msg) cctxt = self.client.prepare(server=vpn_agent['host'], version=version) @@ -141,8 +143,9 @@ def vpnservice_updated(self, context, vpnservice_id, **kwargs): self._agent_notification( context, 'vpnservice_updated', vpnservice_id, **kwargs) - except: - LOG.error(_('Notifying agent failed')) + except Exception: + msg = ('Notifying agent failed') + LOG.error(msg) class NFPIPsecVPNDriver(base_ipsec.BaseIPsecVPNDriver): diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/__init__.py b/gbpservice/contrib/tests/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/__init__.py rename to gbpservice/contrib/tests/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/haproxy/__init__.py b/gbpservice/contrib/tests/unit/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/amphorae/drivers/haproxy/__init__.py rename to gbpservice/contrib/tests/unit/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/__init__.py b/gbpservice/contrib/tests/unit/nfp/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/__init__.py rename to gbpservice/contrib/tests/unit/nfp/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/__init__.py b/gbpservice/contrib/tests/unit/nfp/config_orchestrator/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/__init__.py rename to gbpservice/contrib/tests/unit/nfp/config_orchestrator/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/__init__.py b/gbpservice/contrib/tests/unit/nfp/config_orchestrator/modules/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/__init__.py rename to gbpservice/contrib/tests/unit/nfp/config_orchestrator/modules/__init__.py diff --git a/gbpservice/neutron/tests/unit/nfp/config_orchestrator/modules/test_config_orch.py b/gbpservice/contrib/tests/unit/nfp/config_orchestrator/modules/test_config_orch.py similarity index 95% rename from gbpservice/neutron/tests/unit/nfp/config_orchestrator/modules/test_config_orch.py rename to gbpservice/contrib/tests/unit/nfp/config_orchestrator/modules/test_config_orch.py index 34c163865c..ae43ac9450 100644 --- a/gbpservice/neutron/tests/unit/nfp/config_orchestrator/modules/test_config_orch.py +++ b/gbpservice/contrib/tests/unit/nfp/config_orchestrator/modules/test_config_orch.py @@ -10,20 +10,21 @@ # License for the specific language governing permissions and limitations # under the License. -from gbpservice.nfp.config_orchestrator.handlers.config import ( +import mock +import uuid + +from gbpservice.contrib.nfp.config_orchestrator.common import common +from gbpservice.contrib.nfp.config_orchestrator.handlers.config import ( firewall) -from gbpservice.nfp.config_orchestrator.handlers.config import ( +from gbpservice.contrib.nfp.config_orchestrator.handlers.config import ( loadbalancer) -from gbpservice.nfp.config_orchestrator.handlers.config import vpn -from gbpservice.nfp.config_orchestrator.handlers.notification import ( +from gbpservice.contrib.nfp.config_orchestrator.handlers.config import vpn +from gbpservice.contrib.nfp.config_orchestrator.handlers.notification import ( handler as notif_handler) - -from gbpservice.nfp.config_orchestrator.common import common from gbpservice.nfp.lib import transport -import mock + from neutron import context as ctx -import unittest -import uuid +from neutron.tests import base class TestContext(object): @@ -181,9 +182,10 @@ def _check_resource_header_data(self, rsrc_name, data, resource): return mod_method(data, resource) -class FirewallTestCase(unittest.TestCase): +class FirewallTestCase(base.BaseTestCase): def setUp(self): + super(FirewallTestCase, self).setUp() self.conf = Conf() self.fw_handler = firewall.FwAgent(self.conf, 'sc') self.context = TestContext().get_context() @@ -254,9 +256,10 @@ def test_delete_firewall(self): self.fw_handler.delete_firewall(self.context, self.fw, self.host) -class LoadBalanceTestCase(unittest.TestCase): +class LoadBalanceTestCase(base.BaseTestCase): def setUp(self): + super(LoadBalanceTestCase, self).setUp() self.conf = Conf() self.lb_handler = loadbalancer.LbAgent(self.conf, 'sc') self.context = TestContext().get_context() @@ -286,7 +289,7 @@ def _cast_loadbalancer(self, conf, context, body, def _call_to_get_network_function_desc(self): data = call_network_function_info() data['network_function']['description'] = ("\n" + str( - {'service_vendor': 'xyz'})) + {'service_vendor': 'xyz'})) return data['network_function'] def _call_data(self, context, method, **kwargs): @@ -583,9 +586,10 @@ def test_delete_pool_health_monitor(self): self.context, hm, pool_id) -class VPNTestCase(unittest.TestCase): +class VPNTestCase(base.BaseTestCase): def setUp(self): + super(VPNTestCase, self).setUp() self.conf = Conf() self.vpn_handler = vpn.VpnAgent(self.conf, 'sc') self.context = TestContext().get_context() @@ -611,8 +615,8 @@ def _cast_vpn(self, conf, context, body, def _call_data(self, context, method, **kwargs): if method.lower() == "get_network_function_details": data = call_network_function_info() - data['network_function']['description'] = ("\n" + - ("ipsec_site_connection_id=%s;service_vendor=xyz" % ( + data['network_function']['description'] = ("\n" + ( + "ipsec_site_connection_id=%s;service_vendor=xyz" % ( str(uuid.uuid4())))) return data['network_function'] @@ -639,8 +643,8 @@ def _prepare_request_data(self, reason, rsrc_type): def _call_to_get_network_function_desc(self): data = call_network_function_info() - data['network_function']['description'] = ("\n" + - ("ipsec_site_connection_id=%s;service_vendor=xyz" % ( + data['network_function']['description'] = ("\n" + ( + "ipsec_site_connection_id=%s;service_vendor=xyz" % ( str(uuid.uuid4())))) return data['network_function'] @@ -704,7 +708,7 @@ def test_update_vpnservice_for_ipsec_site_connection(self): self.vpn_handler.vpnservice_updated(self.context, **kwargs) -class FirewallNotifierTestCase(unittest.TestCase): +class FirewallNotifierTestCase(base.BaseTestCase): class Controller(object): @@ -715,6 +719,7 @@ def post_event(self, event): return def setUp(self): + super(FirewallNotifierTestCase, self).setUp() self.conf = Conf() self.n_handler = notif_handler.NaasNotificationHandler( self.conf, self.Controller()) @@ -745,7 +750,7 @@ def get_notification_data(self): def test_set_firewall_status(self): notification_data = self.get_notification_data() rpc_client = self._get_rpc_client() - transport.RPCClient = mock.MagicMock(return_value = rpc_client) + transport.RPCClient = mock.MagicMock(return_value=rpc_client) self.n_handler.handle_notification(self.context, notification_data) @@ -754,12 +759,12 @@ def test_set_firewall_deleted(self): notification_data['notification'][0]['data'][ 'notification_type'] = 'firewall_deleted' rpc_client = self._get_rpc_client() - transport.RPCClient = mock.MagicMock(return_value = rpc_client) + transport.RPCClient = mock.MagicMock(return_value=rpc_client) self.n_handler.handle_notification(self.context, notification_data) -class LoadbalancerNotifierTestCase(unittest.TestCase): +class LoadbalancerNotifierTestCase(base.BaseTestCase): class Controller(object): @@ -770,6 +775,7 @@ def post_event(self, event): return def setUp(self): + super(LoadbalancerNotifierTestCase, self).setUp() self.conf = Conf() self.n_handler = notif_handler.NaasNotificationHandler( self.conf, self.Controller()) @@ -778,7 +784,7 @@ def setUp(self): def _get_rpc_client(self): class Context(object): def cast(self, context, method, host='', pool_id='', - stats ='', body=''): + stats='', body=''): return {} class RCPClient(object): @@ -802,7 +808,7 @@ def get_notification_data(self): def test_update_status(self): notification_data = self.get_notification_data() rpc_client = self._get_rpc_client() - transport.RPCClient = mock.MagicMock(return_value = rpc_client) + transport.RPCClient = mock.MagicMock(return_value=rpc_client) self.n_handler.handle_notification(self.context, notification_data) @@ -811,12 +817,12 @@ def test_update_pool_stats(self): notification_data['notification'][0]['data'][ 'notification_type'] = 'update_pool_stats' rpc_client = self._get_rpc_client() - transport.RPCClient = mock.MagicMock(return_value = rpc_client) + transport.RPCClient = mock.MagicMock(return_value=rpc_client) self.n_handler.handle_notification(self.context, notification_data) -class VpnNotifierTestCase(unittest.TestCase): +class VpnNotifierTestCase(base.BaseTestCase): class Controller(object): @@ -827,6 +833,7 @@ def post_event(self, event): return def setUp(self): + super(VpnNotifierTestCase, self).setUp() self.conf = Conf() self.n_handler = notif_handler.NaasNotificationHandler( self.conf, self.Controller()) @@ -857,10 +864,6 @@ def get_notification_data(self): def test_update_status(self): notification_data = self.get_notification_data() rpc_client = self._get_rpc_client() - transport.RPCClient = mock.MagicMock(return_value = rpc_client) + transport.RPCClient = mock.MagicMock(return_value=rpc_client) self.n_handler.handle_notification(self.context, notification_data) - - -if __name__ == '__main__': - unittest.main() diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/templates/__init__.py b/gbpservice/contrib/tests/unit/nfp/configurator/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/jinja/haproxy/templates/__init__.py rename to gbpservice/contrib/tests/unit/nfp/configurator/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/tls_utils/__init__.py b/gbpservice/contrib/tests/unit/nfp/configurator/advanced_controller/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/tls_utils/__init__.py rename to gbpservice/contrib/tests/unit/nfp/configurator/advanced_controller/__init__.py diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/api/v1/controllers/test_controller.py b/gbpservice/contrib/tests/unit/nfp/configurator/advanced_controller/test_controller.py similarity index 82% rename from gbpservice/neutron/tests/unit/nfp/configurator/api/v1/controllers/test_controller.py rename to gbpservice/contrib/tests/unit/nfp/configurator/advanced_controller/test_controller.py index 2d5b178a1c..3c85335a87 100644 --- a/gbpservice/neutron/tests/unit/nfp/configurator/api/v1/controllers/test_controller.py +++ b/gbpservice/contrib/tests/unit/nfp/configurator/advanced_controller/test_controller.py @@ -15,31 +15,36 @@ import os import oslo_serialization.jsonutils as jsonutils import pecan -PECAN_CONFIG_FILE = os.getcwd() + "/gbpservice/nfp/configurator/api/config.py" +PECAN_CONFIG_FILE = (os.getcwd() + + "/gbpservice/nfp/pecan/api/config.py") + pecan.set_config(PECAN_CONFIG_FILE, overwrite=True) -import unittest + import webtest import zlib +from neutron.tests import base from pecan import rest -from gbpservice.nfp.configurator.api import root_controller -from gbpservice.nfp.configurator.api.v1.controllers import controller +from gbpservice.nfp.pecan import constants +setattr(pecan, 'mode', constants.advanced) -"""This class contains all the unittest cases for REST server of configurator. +from gbpservice.contrib.nfp.configurator.advanced_controller import controller +from gbpservice.nfp.pecan.api import root_controller -This class tests success and failure cases for all the HTTP requests which -are implemented in REST server. run_tests.sh file is used for running all -the tests in this class. All the methods of this class started with test -prefix called and on success it will print ok and on failure it will -print the error trace. -""" +class ControllerTestCase(base.BaseTestCase, rest.RestController): + """ + This class contains all the unittest cases for REST server of configurator. + This class tests success and failure cases for all the HTTP requests which + are implemented in REST server. run_tests.sh file is used for running all + the tests in this class. All the methods of this class started with test + prefix called and on success it will print ok and on failure it will + print the error trace. -class ControllerTestCase(unittest.TestCase, rest.RestController): - + """ @classmethod def setUpClass(cls): """A class method called before tests in an individual class run @@ -172,41 +177,6 @@ def test_put_update_network_function_config(self): 'update_network_function_config', self.data) self.assertEqual(response.status_code, 200) - def test_call(self): - """Tests call function of RPCClient. - - Returns: none - - """ - rpcclient = controller.RPCClient('topic_name') - with mock.patch.object( - rpcclient.client, 'call') as rpc_mock,\ - mock.patch.object( - rpcclient.client, 'prepare') as ( - prepare_mock): - prepare_mock.return_value = rpcclient.client - rpc_mock.return_value = True - value = rpcclient.call('rpc_method_name') - self.assertTrue(value) - - def test_cast(self): - """Tests cast function of RPCClient. - - Returns: none - - """ - rpcclient = controller.RPCClient('topic_name') - with mock.patch.object( - rpcclient.client, 'cast') as rpc_mock,\ - mock.patch.object( - rpcclient.client, 'prepare') as ( - prepare_mock): - prepare_mock.return_value = rpcclient.client - rpc_mock.return_value = True - value = rpcclient.cast('rpc_method_name', - jsonutils.dumps(self.data)) - self.assertTrue(value) - def test_post_create_network_function_device_config_fail(self): """Tests failure case of HTTP post request create_network_function_device_config @@ -302,7 +272,3 @@ def test_put_update_network_function_config_fail(self): '/v1/nfp/update_network_function_config', expect_errors=True) self.assertEqual(response.status_code, 400) - - -if __name__ == '__main__': - unittest.main() diff --git a/gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/network/__init__.py b/gbpservice/contrib/tests/unit/nfp/configurator/agents/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/network/__init__.py rename to gbpservice/contrib/tests/unit/nfp/configurator/agents/__init__.py diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/agents/test_firewall.py b/gbpservice/contrib/tests/unit/nfp/configurator/agents/test_firewall.py similarity index 75% rename from gbpservice/neutron/tests/unit/nfp/configurator/agents/test_firewall.py rename to gbpservice/contrib/tests/unit/nfp/configurator/agents/test_firewall.py index b779de1b3d..7987328188 100644 --- a/gbpservice/neutron/tests/unit/nfp/configurator/agents/test_firewall.py +++ b/gbpservice/contrib/tests/unit/nfp/configurator/agents/test_firewall.py @@ -11,27 +11,22 @@ # under the License. import mock -import unittest +from neutron.tests import base from oslo_config import cfg -from oslo_log import log as logging -from gbpservice.neutron.tests.unit.nfp.configurator.test_data import ( +from gbpservice.contrib.nfp.configurator.agents import firewall as fw +from gbpservice.contrib.nfp.configurator.lib import constants as const +from gbpservice.contrib.nfp.configurator.lib import fw_constants as fw_const +from gbpservice.contrib.tests.unit.nfp.configurator.test_data import ( fw_test_data as fo) -from gbpservice.nfp.configurator.agents import firewall as fw -from gbpservice.nfp.configurator.drivers.firewall.vyos import ( - vyos_fw_driver as fw_dvr) -LOG = logging.getLogger(__name__) -STATUS_ACTIVE = "ACTIVE" +class FWaasRpcManagerTestCase(base.BaseTestCase): + """ Implements test cases for RPC manager methods of firewall agent. -""" Implements test cases for RPC manager methods of firewall agent. + """ -""" - - -class FWaasRpcManagerTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): super(FWaasRpcManagerTestCase, self).__init__(*args, **kwargs) self.fo = fo.FakeObjects() @@ -65,8 +60,8 @@ def _test_event_creation(self, method): 'firewall': self.fo.firewall, 'host': self.fo.host} with mock.patch.object(sc, 'new_event', return_value='foo') as ( - mock_sc_event), \ - mock.patch.object(sc, 'post_event') as mock_sc_rpc_event: + mock_sc_event), ( + mock.patch.object(sc, 'post_event')) as mock_sc_rpc_event: call_method = getattr(agent, method.lower()) call_method(context, self.fo.firewall, self.fo.host) @@ -82,7 +77,7 @@ def test_create_firewall_fwaasrpcmanager(self): """ - self._test_event_creation('CREATE_FIREWALL') + self._test_event_creation(fw_const.FIREWALL_CREATE_EVENT) def test_update_firewall_fwaasrpcmanager(self): """ Implements test case for update firewall method @@ -92,7 +87,7 @@ def test_update_firewall_fwaasrpcmanager(self): """ - self._test_event_creation('UPDATE_FIREWALL') + self._test_event_creation(fw_const.FIREWALL_UPDATE_EVENT) def test_delete_firewall_fwaasrpcmanager(self): """ Implements test case for delete firewall method @@ -102,15 +97,15 @@ def test_delete_firewall_fwaasrpcmanager(self): """ - self._test_event_creation('DELETE_FIREWALL') + self._test_event_creation(fw_const.FIREWALL_DELETE_EVENT) -""" Implements test cases for event handler methods -of firewall agent. -""" +class FwaasHandlerTestCase(base.BaseTestCase): + """ Implements test cases for event handler methods + of firewall agent. + """ -class FwaasHandlerTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): super(FwaasHandlerTestCase, self).__init__(*args, **kwargs) self.fo = fo.FakeObjects() @@ -148,23 +143,24 @@ def _test_handle_event(self, rule_list_info=True): """ agent = self._get_FwHandler_objects() - with mock.patch.object(cfg, 'CONF') as mock_cfg: - mock_cfg.configure_mock(rest_timeout='30', host='foo') - driver = fw_dvr.FwaasDriver(mock_cfg) + driver = mock.Mock() with mock.patch.object( agent.plugin_rpc, 'set_firewall_status') as ( - mock_set_fw_status), \ + mock_set_fw_status), ( mock.patch.object( - agent.plugin_rpc, 'firewall_deleted') as (mock_fw_deleted), \ + agent.plugin_rpc, 'firewall_deleted')) as (mock_fw_deleted), ( mock.patch.object( - driver, 'create_firewall') as mock_create_fw, \ + driver, fw_const.FIREWALL_CREATE_EVENT.lower())) as ( + mock_create_fw), ( mock.patch.object( - driver, 'update_firewall') as mock_update_fw, \ + driver, fw_const.FIREWALL_UPDATE_EVENT.lower())) as ( + mock_update_fw), ( mock.patch.object( - driver, 'delete_firewall') as mock_delete_fw, \ + driver, fw_const.FIREWALL_DELETE_EVENT.lower())) as ( + mock_delete_fw), ( mock.patch.object( - agent, '_get_driver', return_value=driver): + agent, '_get_driver', return_value=driver)): firewall = self.fo._fake_firewall_obj() if not rule_list_info: @@ -182,27 +178,27 @@ def _test_handle_event(self, rule_list_info=True): if 'service_info' in self.fo.context: self.fo.context.pop('service_info') if not rule_list_info: - if self.ev.id == 'CREATE_FIREWALL': + if self.ev.id == fw_const.FIREWALL_CREATE_EVENT: mock_set_fw_status.assert_called_with( agent_info, - firewall['id'], STATUS_ACTIVE, firewall) - elif self.ev.id == 'UPDATE_FIREWALL': + firewall['id'], const.STATUS_ACTIVE, firewall) + elif self.ev.id == fw_const.FIREWALL_UPDATE_EVENT: mock_set_fw_status.assert_called_with( agent_info, - STATUS_ACTIVE, firewall) - elif self.ev.id == 'DELETE_FIREWALL': + const.STATUS_ACTIVE, firewall) + elif self.ev.id == fw_const.FIREWALL_DELETE_EVENT: mock_fw_deleted.assert_called_with( agent_info, firewall['id'], firewall) else: - if self.ev.id == 'CREATE_FIREWALL': + if self.ev.id == fw_const.FIREWALL_CREATE_EVENT: mock_create_fw.assert_called_with( context, firewall, self.fo.host) - elif self.ev.id == 'UPDATE_FIREWALL': + elif self.ev.id == fw_const.FIREWALL_UPDATE_EVENT: mock_update_fw.assert_called_with( context, firewall, self.fo.host) - elif self.ev.id == 'DELETE_FIREWALL': + elif self.ev.id == fw_const.FIREWALL_DELETE_EVENT: mock_delete_fw.assert_called_with( context, firewall, self.fo.host) @@ -215,7 +211,7 @@ def test_create_firewall_with_rule_list_info_true(self): """ - self.ev.id = 'CREATE_FIREWALL' + self.ev.id = fw_const.FIREWALL_CREATE_EVENT self._test_handle_event() def test_update_firewall_with_rule_list_info_true(self): @@ -226,7 +222,7 @@ def test_update_firewall_with_rule_list_info_true(self): """ - self.ev.id = 'UPDATE_FIREWALL' + self.ev.id = fw_const.FIREWALL_UPDATE_EVENT self._test_handle_event() def test_delete_firewall_with_rule_list_info_true(self): @@ -237,7 +233,7 @@ def test_delete_firewall_with_rule_list_info_true(self): """ - self.ev.id = 'DELETE_FIREWALL' + self.ev.id = fw_const.FIREWALL_DELETE_EVENT self._test_handle_event() def test_create_firewall_with_rule_list_info_false(self): @@ -248,7 +244,7 @@ def test_create_firewall_with_rule_list_info_false(self): """ - self.ev.id = 'CREATE_FIREWALL' + self.ev.id = fw_const.FIREWALL_CREATE_EVENT self._test_handle_event(False) def test_update_firewall_with_rule_list_info_false(self): @@ -259,7 +255,7 @@ def test_update_firewall_with_rule_list_info_false(self): """ - self.ev.id = 'UPDATE_FIREWALL' + self.ev.id = fw_const.FIREWALL_UPDATE_EVENT self._test_handle_event(False) def test_delete_firewall_with_rule_list_info_false(self): @@ -270,9 +266,5 @@ def test_delete_firewall_with_rule_list_info_false(self): """ - self.ev.id = 'DELETE_FIREWALL' + self.ev.id = fw_const.FIREWALL_DELETE_EVENT self._test_handle_event(False) - - -if __name__ == '__main__': - unittest.main() diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/agents/test_generic_config.py b/gbpservice/contrib/tests/unit/nfp/configurator/agents/test_generic_config.py similarity index 72% rename from gbpservice/neutron/tests/unit/nfp/configurator/agents/test_generic_config.py rename to gbpservice/contrib/tests/unit/nfp/configurator/agents/test_generic_config.py index 34d5935f3f..a09a8ebfd9 100644 --- a/gbpservice/neutron/tests/unit/nfp/configurator/agents/test_generic_config.py +++ b/gbpservice/contrib/tests/unit/nfp/configurator/agents/test_generic_config.py @@ -12,30 +12,21 @@ import mock import subprocess -import unittest -from oslo_config import cfg -from oslo_log import log as logging +from neutron.tests import base -from gbpservice.neutron.tests.unit.nfp.configurator.test_data import ( +from gbpservice.contrib.nfp.configurator.agents import generic_config as gc +from gbpservice.contrib.nfp.configurator.lib import ( + generic_config_constants as const) +from gbpservice.contrib.nfp.configurator.lib import constants as common_const +from gbpservice.contrib.tests.unit.nfp.configurator.test_data import ( fw_test_data as fo) -from gbpservice.nfp.configurator.agents import generic_config as gc -from gbpservice.nfp.configurator.drivers.firewall.vyos import ( - vyos_fw_driver as fw_dvr) -from gbpservice.nfp.configurator.lib import ( - generic_config_constants as gen_cfg_const) -LOG = logging.getLogger(__name__) -STATUS_ACTIVE = "ACTIVE" +class GenericConfigRpcManagerTestCase(base.BaseTestCase): + """ Implement test cases for RPC manager methods of generic config agent. -""" Implement test cases for RPC manager methods of generic config agent. - -""" - - -class GenericConfigRpcManagerTestCase(unittest.TestCase): - ''' Generic Config RPC receiver for Firewall module ''' + """ def __init__(self, *args, **kwargs): super(GenericConfigRpcManagerTestCase, self).__init__( @@ -73,8 +64,8 @@ def _test_event_creation(self, method): arg_dict = {'context': self.fo.context, 'resource_data': self.fo.kwargs} with mock.patch.object( - sc, 'new_event', return_value='foo') as mock_sc_event, \ - mock.patch.object(sc, 'post_event') as mock_sc_rpc_event: + sc, 'new_event', return_value='foo') as mock_sc_event, ( + mock.patch.object(sc, 'post_event')) as mock_sc_rpc_event: call_method = getattr(agent, method.lower()) call_method(self.fo.context, self.fo.kwargs) @@ -96,7 +87,7 @@ def test_configure_interfaces_genericconfigrpcmanager(self): """ - self._test_event_creation('CONFIGURE_INTERFACES') + self._test_event_creation(const.EVENT_CONFIGURE_INTERFACES) def test_clear_interfaces_genericconfigrpcmanager(self): """ Implements test case for clear interfaces method @@ -106,7 +97,7 @@ def test_clear_interfaces_genericconfigrpcmanager(self): """ - self._test_event_creation('CLEAR_INTERFACES') + self._test_event_creation(const.EVENT_CLEAR_INTERFACES) def test_configure_routes_genericconfigrpcmanager(self): """ Implements test case for configure routes method @@ -116,7 +107,7 @@ def test_configure_routes_genericconfigrpcmanager(self): """ - self._test_event_creation('CONFIGURE_ROUTES') + self._test_event_creation(const.EVENT_CONFIGURE_ROUTES) def test_clear_routes_genericconfigrpcmanager(self): """ Implements test case for clear routes method @@ -126,7 +117,7 @@ def test_clear_routes_genericconfigrpcmanager(self): """ - self._test_event_creation('CLEAR_ROUTES') + self._test_event_creation(const.EVENT_CLEAR_ROUTES) def test_configure_hm_genericconfigrpcmanager(self): """ Implements test case for configure healthmonitor method @@ -136,7 +127,7 @@ def test_configure_hm_genericconfigrpcmanager(self): """ - self._test_event_creation('CONFIGURE_HEALTHMONITOR') + self._test_event_creation(const.EVENT_CONFIGURE_HEALTHMONITOR) def test_clear_hm_genericconfigrpcmanager(self): """ Implements test case for clear healthmonitor method @@ -146,15 +137,15 @@ def test_clear_hm_genericconfigrpcmanager(self): """ - self._test_event_creation('CLEAR_HEALTHMONITOR') + self._test_event_creation(const.EVENT_CLEAR_HEALTHMONITOR) -""" Implements test cases for event handler methods -of generic config agent. -""" +class GenericConfigEventHandlerTestCase(base.BaseTestCase): + """ Implements test cases for event handler methods + of generic config agent. + """ -class GenericConfigEventHandlerTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): super(GenericConfigEventHandlerTestCase, self).__init__( *args, **kwargs) @@ -192,51 +183,60 @@ def _test_handle_event(self, ev): """ agent, sc = self._get_GenericConfigEventHandler_object() - with mock.patch.object(cfg, 'CONF') as mock_cfg: - mock_cfg.configure_mock(rest_timeout='30', host='foo') - driver = fw_dvr.FwaasDriver(mock_cfg) + driver = mock.Mock() with mock.patch.object( - driver, 'configure_interfaces') as mock_config_inte, \ + driver, const.EVENT_CONFIGURE_INTERFACES.lower(), + return_value=common_const.SUCCESS) as (mock_config_inte), ( mock.patch.object( - driver, 'clear_interfaces') as mock_clear_inte, \ + driver, const.EVENT_CLEAR_INTERFACES.lower(), + return_value=common_const.SUCCESS)) as (mock_clear_inte), ( mock.patch.object( - driver, 'configure_routes') as mock_config_src_routes, \ + driver, const.EVENT_CONFIGURE_ROUTES.lower(), + return_value=common_const.SUCCESS)) as ( + mock_config_src_routes), ( mock.patch.object( - driver, 'clear_routes') as mock_delete_src_routes, \ + driver, const.EVENT_CLEAR_ROUTES.lower(), + return_value=common_const.SUCCESS)) as ( + mock_delete_src_routes), ( mock.patch.object( - sc, 'poll_event') as mock_hm_poll_event, \ + sc, 'poll_event')) as mock_hm_poll_event, ( mock.patch.object( - driver, 'configure_healthmonitor', return_value='SUCCESS'), \ + driver, const.EVENT_CONFIGURE_HEALTHMONITOR.lower(), + return_value=common_const.SUCCESS)), ( mock.patch.object( - agent, '_get_driver', return_value=driver): + driver, const.EVENT_CLEAR_HEALTHMONITOR.lower(), + return_value=common_const.SUCCESS)) as mock_clear_hm, ( + mock.patch.object( + agent, '_get_driver', return_value=driver)): - if 'CONFIGURE_HEALTHMONITOR' in ev.id: + if const.EVENT_CONFIGURE_HEALTHMONITOR in ev.id: ev.id, periodicity = ev.id.split() agent.handle_event(ev) resource_data = self.fo._fake_resource_data() - if ev.id == 'CONFIGURE_INTERFACES': + if ev.id == const.EVENT_CONFIGURE_INTERFACES: mock_config_inte.assert_called_with( self.fo.context, resource_data) - elif ev.id == 'CLEAR_INTERFACES': + elif ev.id == const.EVENT_CLEAR_INTERFACES: mock_clear_inte.assert_called_with( self.fo.context, resource_data) - elif ev.id == 'CONFIGURE_ROUTES': + elif ev.id == const.EVENT_CONFIGURE_ROUTES: mock_config_src_routes.assert_called_with( self.fo.context, resource_data) - elif ev.id == 'CLEAR_ROUTES': + elif ev.id == const.EVENT_CLEAR_ROUTES: mock_delete_src_routes.assert_called_with( self.fo.context, resource_data) - elif 'CONFIGURE_HEALTHMONITOR' in ev.id: - if periodicity == gen_cfg_const.INITIAL_HM_RETRIES: + elif const.EVENT_CONFIGURE_HEALTHMONITOR in ev.id: + if periodicity == const.INITIAL_HM_RETRIES: mock_hm_poll_event.assert_called_with( - ev, max_times=gen_cfg_const.INITIAL_HM_RETRIES) - elif periodicity == gen_cfg_const.FOREVER: + ev, max_times=const.INITIAL_HM_RETRIES) + elif periodicity == const.FOREVER: mock_hm_poll_event.assert_called_with(ev) - elif ev.id == 'CLEAR_HEALTHMONITOR': - pass + elif ev.id == const.EVENT_CLEAR_HEALTHMONITOR: + mock_clear_hm.assert_called_with( + self.fo.context, resource_data) def _test_handle_periodic_event(self, ev): """ Test handle periodic event method of generic config agent @@ -250,15 +250,14 @@ def _test_handle_periodic_event(self, ev): """ agent, sc = self._get_GenericConfigEventHandler_object() - with mock.patch.object(cfg, 'CONF') as mock_cfg: - mock_cfg.configure_mock(rest_timeout='30', host='foo') - driver = fw_dvr.FwaasDriver(mock_cfg) + driver = mock.Mock() + with mock.patch.object( - agent, '_get_driver', return_value=driver), \ - mock.patch.object( - driver, 'configure_healthmonitor', - return_value='SUCCESS'), \ - mock.patch.object(subprocess, 'check_output', return_value=True): + agent, '_get_driver', return_value=driver), ( + mock.patch.object( + driver, const.EVENT_CONFIGURE_HEALTHMONITOR.lower(), + return_value=common_const.SUCCESS)), ( + mock.patch.object(subprocess, 'check_output', return_value=True)): agent.handle_configure_healthmonitor(ev) @@ -271,7 +270,7 @@ def test_configure_interfaces_genericconfigeventhandler(self): """ ev = fo.FakeEventGenericConfig() - ev.id = 'CONFIGURE_INTERFACES' + ev.id = const.EVENT_CONFIGURE_INTERFACES self._test_handle_event(ev) def test_clear_interfaces_genericconfigeventhandler(self): @@ -283,7 +282,7 @@ def test_clear_interfaces_genericconfigeventhandler(self): """ ev = fo.FakeEventGenericConfig() - ev.id = 'CLEAR_INTERFACES' + ev.id = const.EVENT_CLEAR_INTERFACES self._test_handle_event(ev) def test_configure_routes_genericconfigeventhandler(self): @@ -295,7 +294,7 @@ def test_configure_routes_genericconfigeventhandler(self): """ ev = fo.FakeEventGenericConfig() - ev.id = 'CONFIGURE_ROUTES' + ev.id = const.EVENT_CONFIGURE_ROUTES self._test_handle_event(ev) def test_clear_routes_genericconfigeventhandler(self): @@ -307,7 +306,7 @@ def test_clear_routes_genericconfigeventhandler(self): """ ev = fo.FakeEventGenericConfig() - ev.id = 'CLEAR_ROUTES' + ev.id = const.EVENT_CLEAR_ROUTES self._test_handle_event(ev) def test_configure_hm_initial_genericconfigeventhandler(self): @@ -331,7 +330,7 @@ def test_configure_hm_forever_genericconfigeventhandler(self): """ ev = fo.FakeEventGenericConfig() - ev.data['resource_data'].update({'periodicity': gen_cfg_const.FOREVER}) + ev.data['resource_data'].update({'periodicity': const.FOREVER}) ev.id = 'CONFIGURE_HEALTHMONITOR forever' self._test_handle_event(ev) @@ -340,7 +339,6 @@ def test_clear_hm_genericconfigeventhandler(self): of generic config event handler. Returns: none - """ ev = fo.FakeEventGenericConfig() @@ -356,9 +354,5 @@ def test_handle_configure_healthmonitor_genericconfigeventhandler(self): """ ev = fo.FakeEventGenericConfig() - ev.id = 'CONFIGURE_HEALTHMONITOR' + ev.id = const.EVENT_CONFIGURE_HEALTHMONITOR self._test_handle_periodic_event(ev) - - -if __name__ == '__main__': - unittest.main() diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/agents/test_lb_agent.py b/gbpservice/contrib/tests/unit/nfp/configurator/agents/test_lb_agent.py similarity index 87% rename from gbpservice/neutron/tests/unit/nfp/configurator/agents/test_lb_agent.py rename to gbpservice/contrib/tests/unit/nfp/configurator/agents/test_lb_agent.py index 0868c614c4..d4fa44da7e 100644 --- a/gbpservice/neutron/tests/unit/nfp/configurator/agents/test_lb_agent.py +++ b/gbpservice/contrib/tests/unit/nfp/configurator/agents/test_lb_agent.py @@ -11,24 +11,20 @@ # under the License. import mock -import unittest -from gbpservice.neutron.tests.unit.nfp.configurator.test_data import ( +from gbpservice.contrib.nfp.configurator.agents import loadbalancer_v1 as lb +from gbpservice.contrib.nfp.configurator.lib import constants as const +from gbpservice.contrib.nfp.configurator.lib import demuxer +from gbpservice.contrib.nfp.configurator.modules import configurator +from gbpservice.contrib.tests.unit.nfp.configurator.test_data import ( lb_test_data as test_data) -from gbpservice.nfp.configurator.agents import loadbalancer_v1 as lb -from gbpservice.nfp.configurator.drivers.loadbalancer.v1.haproxy import ( - haproxy_lb_driver as lb_driver) -from gbpservice.nfp.configurator.lib import constants as const -from gbpservice.nfp.configurator.lib import demuxer -from gbpservice.nfp.configurator.modules import configurator +from neutron.tests import base -"""Implement test cases for LBaasRpcSender methods of loadbalancer agent. - -""" - - -class LBaasRpcSenderTest(unittest.TestCase): +class LBaasRpcSenderTest(base.BaseTestCase): + """Implements test cases for LBaasRpcSender class methods of + loadbalancer agent. + """ @mock.patch(__name__ + '.test_data.FakeObjects.conf') @mock.patch(__name__ + '.test_data.FakeObjects.sc') @@ -59,9 +55,11 @@ def test_update_status(self): sc, conf, rpc_mgr = self._get_configurator_rpc_manager_object() agent = lb.LBaasRpcSender(sc) agent_info = {'context': 'context', 'resource': 'pool'} - with mock.patch.object(sc, 'new_event', return_value='foo') as ( - mock_new_event),\ - mock.patch.object(sc, 'stash_event') as mock_stash_event: + with mock.patch.object( + sc, 'new_event', return_value='foo') as mock_new_event, ( + mock.patch.object( + sc, 'stash_event')) as mock_stash_event: + agent.update_status('pool', 'object_id', 'status', agent_info, 'pool') @@ -92,9 +90,10 @@ def test_update_pool_stats(self): sc, conf, rpc_mgr = self._get_configurator_rpc_manager_object() agent = lb.LBaasRpcSender(sc) - with mock.patch.object(sc, 'new_event', return_value='foo') as ( - mock_new_event), \ - mock.patch.object(sc, 'stash_event') as (mock_stash_event): + with mock.patch.object( + sc, 'new_event', return_value='foo') as mock_new_event, ( + mock.patch.object( + sc, 'stash_event')) as mock_stash_event: context = test_data.Context() agent.update_pool_stats('pool_id', 'stats', context) @@ -127,12 +126,11 @@ def test_get_logical_device(self): '6350c0fd-07f8-46ff-b797-62acd23760de', test_data.FakeObjects()._get_context_logical_device()) -"""Implement test cases for RPC manager methods of loadbalancer agent. - -""" - -class LBaaSRpcManagerTest(unittest.TestCase): +class LBaaSRpcManagerTest(base.BaseTestCase): + """Implements test cases for LBaaSRpcManager class methods of + loadbalancer agent. + """ def __init__(self, *args, **kwargs): super(LBaaSRpcManagerTest, self).__init__(*args, **kwargs) @@ -224,12 +222,12 @@ def _test_rpc_manager(self, operation, request_data, args): agent, sc = self._get_lbaas_rpc_manager_object(conf, sc) method = self.fo.method - with mock.patch.object(sc, 'new_event', return_value=self.foo) as ( - mock_sc_new_event), \ - mock.patch.object(sc, 'post_event') as mock_sc_post_event, \ - mock.patch.object(rpc_mgr, - '_get_service_agent_instance', - return_value=agent): + with mock.patch.object( + sc, 'new_event', return_value=self.foo) as mock_sc_new_event, ( + mock.patch.object( + sc, 'post_event')) as mock_sc_post_event, ( + mock.patch.object( + rpc_mgr, '_get_service_agent_instance', return_value=agent)): getattr(rpc_mgr, method[operation])(self.fo.context, request_data) mock_sc_new_event.assert_called_with(id=operation, data=args) @@ -388,18 +386,17 @@ def test_UPDATE_POOL_HEALTH_MONITOR_rpc_manager(self): self.fo.get_request_data_for_update_pool_hm(), self.arg_dict_health_monitor_update) -"""Implement test cases for methods of EventHandler of loadbalancer agent. - -""" - -class LBaasEventHandlerTestCase(unittest.TestCase): +class LBaasEventHandlerTestCase(base.BaseTestCase): + """Implement test cases for LBaaSEventHandler class methods of + loadbalancer agent. + """ def __init__(self, *args, **kwargs): super(LBaasEventHandlerTestCase, self).__init__(*args, **kwargs) self.fo = test_data.FakeObjects() self.ev = test_data.FakeEvent() - self.drivers = {'loadbalancer': lb_driver.HaproxyOnVmDriver()} + self.drivers = {'loadbalancer': mock.Mock()} def _get_lb_handler_objects(self, sc, drivers, rpcmgr): """ Retrieves EventHandler object of loadbalancer agent. @@ -407,7 +404,6 @@ def _get_lb_handler_objects(self, sc, drivers, rpcmgr): :param sc: mocked service controller object of process model framework :param drivers: mocked drivers object of loadbalancer object :param rpcmgr: mocked RPC manager object loadbalancer object - :param nqueue: mocked nqueue object of process model framework Returns: objects of LBaaSEventHandler of loadbalancer agent @@ -427,34 +423,34 @@ def _test_handle_event(self, sc, rpcmgr): """ agent = self._get_lb_handler_objects(sc, self.drivers, rpcmgr) - driver = lb_driver.HaproxyOnVmDriver() + driver = self.drivers['loadbalancer'] - with mock.patch.object(agent, '_get_driver', return_value=driver), \ + with mock.patch.object( + agent, '_get_driver', return_value=driver), ( mock.patch.object( - driver, 'create_vip') as mock_create_vip,\ + driver, 'create_vip')) as mock_create_vip, ( mock.patch.object( - driver, 'delete_vip') as mock_delete_vip,\ + driver, 'delete_vip')) as mock_delete_vip, ( mock.patch.object( - driver, 'update_vip') as mock_update_vip,\ + driver, 'update_vip')) as mock_update_vip, ( mock.patch.object( - self.drivers['loadbalancer'], 'create_pool') as ( - mock_create_pool),\ + driver, 'create_pool')) as mock_create_pool, ( mock.patch.object( - driver, 'delete_pool') as mock_delete_pool,\ + driver, 'delete_pool')) as mock_delete_pool, ( mock.patch.object( - driver, 'update_pool') as mock_update_pool,\ + driver, 'update_pool')) as mock_update_pool, ( mock.patch.object( - driver, 'create_member') as mock_create_member,\ + driver, 'create_member')) as mock_create_member, ( mock.patch.object( - driver, 'delete_member') as mock_delete_member,\ + driver, 'delete_member')) as mock_delete_member, ( mock.patch.object( - driver, 'update_member') as mock_update_member,\ + driver, 'update_member')) as mock_update_member, ( mock.patch.object( - driver, 'create_pool_health_monitor') as mock_create_poolhm,\ + driver, 'create_pool_health_monitor')) as mock_create_poolhm, ( mock.patch.object( - driver, 'delete_pool_health_monitor') as mock_delete_poolhm,\ + driver, 'delete_pool_health_monitor')) as mock_delete_poolhm, ( mock.patch.object( - driver, 'update_pool_health_monitor') as mock_update_poolhm: + driver, 'update_pool_health_monitor')) as mock_update_poolhm: vip = self.fo._get_vip_object()[0] old_vip = self.fo._get_vip_object()[0] @@ -632,7 +628,3 @@ def test_update_pool_hm_event_handler(self): self.ev.id = 'UPDATE_POOL_HEALTH_MONITOR' self._test_handle_event() - - -if __name__ == '__main__': - unittest.main() diff --git a/gbpservice/contrib/tests/unit/nfp/configurator/agents/test_nfp_service.py b/gbpservice/contrib/tests/unit/nfp/configurator/agents/test_nfp_service.py new file mode 100644 index 0000000000..403b216a68 --- /dev/null +++ b/gbpservice/contrib/tests/unit/nfp/configurator/agents/test_nfp_service.py @@ -0,0 +1,153 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.tests import base + +from gbpservice.contrib.nfp.configurator.agents import nfp_service as ns +from gbpservice.contrib.nfp.configurator.lib import ( + nfp_service_constants as const) +from gbpservice.contrib.tests.unit.nfp.configurator.test_data import ( + nfp_service_test_data as fo) + + +class NfpServiceRpcManagerTestCase(base.BaseTestCase): + """ Implement test cases for RPC manager methods of nfp service agent. + + """ + + def __init__(self, *args, **kwargs): + super(NfpServiceRpcManagerTestCase, self).__init__( + *args, **kwargs) + self.fo = fo.FakeObjects() + + @mock.patch(__name__ + '.fo.FakeObjects.sc') + @mock.patch(__name__ + '.fo.FakeObjects.conf') + def _get_NfpServiceRpcManager_object(self, conf, sc): + """ Retrieves RPC manager object of nfp service agent. + + :param sc: mocked service controller object of process model framework + :param conf: mocked OSLO configuration file + + Returns: object of nfp service's RPC manager + and service controller. + """ + + agent = ns.ConfigScriptRpcManager(sc, conf) + return agent, sc + + def _test_event_creation(self, method): + """ Tests event creation and enqueueing for create/delete + operation of generic config agent's RPC manager. + + :param method: CREATE_NFP_SERVICE + + Returns: none + """ + + agent, sc = self._get_NfpServiceRpcManager_object() + arg_dict = {'context': self.fo.context, + 'resource_data': self.fo.kwargs} + with mock.patch.object( + sc, 'new_event', return_value='foo') as mock_sc_event, ( + mock.patch.object(sc, 'post_event')) as mock_sc_rpc_event: + actual_call = agent.run_nfp_service(self.fo.context, + self.fo.kwargs) + + expected_cal = mock_sc_event.assert_called_with( + id=method, data=arg_dict, key=None) + self.assertEqual(actual_call, expected_cal) + mock_sc_rpc_event.assert_called_with('foo') + + def test_nfp_service_rpcmanager(self): + """ Implements test case for run_nfp_service method + of nfp service RPC manager. + + Returns: none + """ + + self._test_event_creation(const.CREATE_NFP_SERVICE_EVENT) + + +class NfpServiceEventHandlerTestCase(base.BaseTestCase): + """ Implements test cases for event handler methods + of nfp service agent. + + """ + + def __init__(self, *args, **kwargs): + super(NfpServiceEventHandlerTestCase, self).__init__( + *args, **kwargs) + self.fo = fo.FakeObjects() + self.context = {'notification_data': {}, + 'resource': 'interfaces'} + + @mock.patch(__name__ + '.fo.FakeObjects.rpcmgr') + @mock.patch(__name__ + '.fo.FakeObjects.drivers') + @mock.patch(__name__ + '.fo.FakeObjects.sc') + def _get_nfp_service_event_handler_object(self, sc, drivers, rpcmgr): + """ Retrieves event handler object of nfp service. + + :param sc: mocked service controller object of process model framework + :param rpcmgr: object of configurator's RPC manager + :param drivers: list of driver objects for nfp service agent + + Returns: object of nfp service's event handler + """ + + agent = ns.ConfigScriptEventHandler(sc, drivers, rpcmgr) + return agent, sc + + def _test_handle_event(self, ev, result=const.UNHANDLED_RESULT): + """ Test handle event method of nfp service agent. + + :param ev: event data which has to be actually sent by + process framework. + + Returns: None + """ + + agent, sc = self._get_nfp_service_event_handler_object() + driver = mock.Mock() + + with mock.patch.object( + driver, 'run_heat', return_value=result) as mock_config_inte, ( + mock.patch.object( + agent, '_get_driver', return_value=driver)): + + agent.handle_event(ev) + + mock_config_inte.assert_called_with( + ev.data['context']['context'], ev.data['resource_data']) + + def test_create_nfp_service_handle_event_success(self): + """ Implements positive test case for create_nfp_service method + of nfp service event handler. + + Returns: none + """ + + ev = fo.FakeEventNfpService() + ev.id = const.CREATE_NFP_SERVICE_EVENT + self._test_handle_event(ev) + + def test_create_nfp_service_handle_event_failure(self): + """ Implements negative test case for create_nfp_service method + of nfp service event handler. + + Returns: none + """ + + ev = fo.FakeEventNfpService() + ev.id = const.CREATE_NFP_SERVICE_EVENT + self._test_handle_event(ev, const.FAILURE) diff --git a/gbpservice/contrib/tests/unit/nfp/configurator/agents/test_vpn_agent.py b/gbpservice/contrib/tests/unit/nfp/configurator/agents/test_vpn_agent.py new file mode 100644 index 0000000000..67012ead1a --- /dev/null +++ b/gbpservice/contrib/tests/unit/nfp/configurator/agents/test_vpn_agent.py @@ -0,0 +1,93 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import mock + +from gbpservice.contrib.nfp.configurator.agents import vpn +from gbpservice.contrib.nfp.configurator.lib import vpn_constants as const +from gbpservice.contrib.tests.unit.nfp.configurator.test_data import ( + vpn_test_data) + +from neutron.tests import base + + +class VPNaasRpcManagerTestCase(base.BaseTestCase): + """ + Implements test cases for RPC manager methods of vpn agent + """ + def __init__(self, *args, **kwargs): + super(VPNaasRpcManagerTestCase, self).__init__(*args, **kwargs) + self.test_dict = vpn_test_data.VPNTestData() + self.conf = self.test_dict.conf + self.sc = mock.Mock() + self.rpcmgr = vpn.VPNaasRpcManager(self.conf, self.sc) + + def test_vpnservice_updated(self): + resource_data = self.test_dict._create_ipsec_site_conn_obj() + with mock.patch.object(self.sc, 'new_event', + return_value='foo'), ( + mock.patch.object(self.sc, 'post_event')) as mock_post_event: + self.rpcmgr.vpnservice_updated( + self.test_dict.make_service_context(), + resource_data=resource_data) + mock_post_event.assert_called_with('foo') + + +class VPNaasEventHandlerTestCase(base.BaseTestCase): + """ + Implements test cases for RPC manager methods of vpn agent + """ + def __init__(self, *args, **kwargs): + super(VPNaasEventHandlerTestCase, self).__init__(*args, **kwargs) + self.test_dict = vpn_test_data.VPNTestData() + self.sc = self.test_dict.sc + self.conf = self.test_dict.conf + self.handler = vpn.VPNaasEventHandler(self.test_dict.sc, + self.test_dict.drivers) + self.ev = vpn_test_data.FakeEvent() + self.rpc_sender = vpn.VpnaasRpcSender(self.sc) + self.driver = mock.Mock() + + def test_handle_event(self): + """ + Test to handle the vpn agent's vpnservice_updated method to + handle various vpn operations + + """ + with mock.patch.object(self.handler, + '_get_driver', + return_value=self.test_dict.drivers), ( + mock.patch.object( + self.driver, + 'vpnservice_updated')) as mock_vpnservice_updated: + self.handler._vpnservice_updated(self.ev, self.driver) + mock_vpnservice_updated.assert_called_with(self.ev.data['context'], + self.ev.data[ + 'resource_data']) + + def test_sync(self): + """ + Test to handle the vpn service status like ACTIVE, ERROR + after the configurations. + + """ + with mock.patch.object(self.handler, + '_get_driver', + return_value=self.driver), ( + mock.patch.object(self.rpc_sender, + 'get_vpn_servicecontext')), ( + mock.patch.object(self.driver, + 'check_status', + return_value=const.STATE_ACTIVE)): + + self.assertEqual(self.handler.sync(self.ev), {'poll': False}) diff --git a/gbpservice/nfp/configurator/drivers/nfp_service/__init__.py b/gbpservice/contrib/tests/unit/nfp/configurator/drivers/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/nfp_service/__init__.py rename to gbpservice/contrib/tests/unit/nfp/configurator/drivers/__init__.py diff --git a/gbpservice/nfp/configurator/drivers/nfp_service/heat/__init__.py b/gbpservice/contrib/tests/unit/nfp/configurator/drivers/firewall/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/nfp_service/heat/__init__.py rename to gbpservice/contrib/tests/unit/nfp/configurator/drivers/firewall/__init__.py diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/drivers/firewall/test_asav_fw_driver.py b/gbpservice/contrib/tests/unit/nfp/configurator/drivers/firewall/test_asav_fw_driver.py similarity index 98% rename from gbpservice/neutron/tests/unit/nfp/configurator/drivers/firewall/test_asav_fw_driver.py rename to gbpservice/contrib/tests/unit/nfp/configurator/drivers/firewall/test_asav_fw_driver.py index acd3dc9be1..7ea2c3e90d 100644 --- a/gbpservice/neutron/tests/unit/nfp/configurator/drivers/firewall/test_asav_fw_driver.py +++ b/gbpservice/contrib/tests/unit/nfp/configurator/drivers/firewall/test_asav_fw_driver.py @@ -17,9 +17,9 @@ from oslo_config import cfg from oslo_serialization import jsonutils -from gbpservice.neutron.tests.unit.nfp.configurator.test_data import ( +from gbpservice.contrib.tests.unit.nfp.configurator.test_data import ( asav_fw_test_data as fo) -from gbpservice.nfp.configurator.drivers.firewall.asav import ( +from gbpservice.contrib.nfp.configurator.drivers.firewall.asav import ( asav_fw_driver as fw_dvr) diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/drivers/firewall/test_vyos_fw_driver.py b/gbpservice/contrib/tests/unit/nfp/configurator/drivers/firewall/test_vyos_fw_driver.py similarity index 64% rename from gbpservice/neutron/tests/unit/nfp/configurator/drivers/firewall/test_vyos_fw_driver.py rename to gbpservice/contrib/tests/unit/nfp/configurator/drivers/firewall/test_vyos_fw_driver.py index c4554f46fe..187d4110ac 100644 --- a/gbpservice/neutron/tests/unit/nfp/configurator/drivers/firewall/test_vyos_fw_driver.py +++ b/gbpservice/contrib/tests/unit/nfp/configurator/drivers/firewall/test_vyos_fw_driver.py @@ -12,26 +12,23 @@ import mock import requests -import unittest +from neutron.tests import base from oslo_config import cfg from oslo_serialization import jsonutils -from gbpservice.neutron.tests.unit.nfp.configurator.test_data import ( - fw_test_data as fo) -from gbpservice.nfp.configurator.drivers.firewall.vyos import ( +from gbpservice.contrib.nfp.configurator.drivers.firewall.vyos import ( vyos_fw_driver as fw_dvr) +from gbpservice.contrib.nfp.configurator.lib import constants as const +from gbpservice.contrib.tests.unit.nfp.configurator.test_data import ( + fw_test_data as fo) -STATUS_ACTIVE = "ACTIVE" - -""" Implements test cases for driver methods -of generic config. - -""" - +class FwGenericConfigDriverTestCase(base.BaseTestCase): + """ Implements test cases for driver methods + of generic config. -class FwGenericConfigDriverTestCase(unittest.TestCase): + """ def __init__(self, *args, **kwargs): super(FwGenericConfigDriverTestCase, self).__init__(*args, **kwargs) @@ -40,9 +37,31 @@ def __init__(self, *args, **kwargs): mock_cfg.configure_mock(rest_timeout=120, host='foo') self.driver = fw_dvr.FwaasDriver(mock_cfg) self.resp = mock.Mock() - self.fake_resp_dict = {'status': True} + self.fake_resp_dict = {'status': True, 'reason': 'not found!'} self.kwargs = self.fo._fake_resource_data() + def test_configure_static_ip(self): + """ Implements test case for configure static ip method + of generic config driver. + + Returns: none + + """ + + with mock.patch.object( + requests, 'post', return_value=self.resp) as mock_post, ( + mock.patch.object( + self.resp, 'json', return_value=self.fake_resp_dict)), ( + mock.patch.object( + self.driver, '_configure_log_forwarding', + return_value=const.STATUS_SUCCESS)): + self.driver.configure_interfaces(self.fo.context, self.kwargs) + + data = jsonutils.dumps(self.fo.static_ip_data()) + mock_post.assert_called_with( + self.fo.get_url_for_api('add_static_ip'), + data=data, timeout=self.fo.timeout) + def test_configure_interfaces(self): """ Implements test case for configure interfaces method of generic config driver. @@ -52,13 +71,20 @@ def test_configure_interfaces(self): """ with mock.patch.object( - requests, 'post', return_value=self.resp) as mock_post, \ + requests, 'post', return_value=self.resp) as mock_post, ( mock.patch.object( - self.resp, 'json', return_value=self.fake_resp_dict): + self.resp, 'json', return_value=self.fake_resp_dict)), ( + mock.patch.object( + self.driver, '_configure_log_forwarding', + return_value=const.STATUS_SUCCESS)), ( + mock.patch.object( + self.driver, '_configure_static_ips', + return_value=const.STATUS_SUCCESS)): self.driver.configure_interfaces(self.fo.context, self.kwargs) - mock_post.assert_called_with(self.fo.url_for_add_inte, - self.fo.data_for_interface, + data = jsonutils.dumps(self.fo.data_for_interface) + mock_post.assert_called_with(self.fo.get_url_for_api('add_inte'), + data=data, timeout=self.fo.timeout) def test_clear_interfaces(self): @@ -71,14 +97,15 @@ def test_clear_interfaces(self): self.resp = mock.Mock(status_code=200) with mock.patch.object( - requests, 'delete', return_value=self.resp) as mock_delete, \ + requests, 'delete', return_value=self.resp) as mock_delete, ( mock.patch.object( - self.resp, 'json', return_value=self.fake_resp_dict): + self.resp, 'json', return_value=self.fake_resp_dict)): self.driver.clear_interfaces(self.fo.context, self.kwargs) + data = jsonutils.dumps(self.fo.data_for_interface) mock_delete.assert_called_with( - self.fo.url_for_del_inte, - data=self.fo.data_for_interface, + self.fo.get_url_for_api('del_inte'), + data=data, timeout=self.fo.timeout) def test_configure_source_routes(self): @@ -90,15 +117,18 @@ def test_configure_source_routes(self): """ with mock.patch.object( - requests, 'post', return_value=self.resp) as mock_post, \ + requests, 'post', return_value=self.resp) as mock_post, ( mock.patch.object( - self.resp, 'json', return_value=self.fake_resp_dict): - self.driver.configure_routes( - self.fo.context, self.kwargs) + self.resp, 'json', return_value=self.fake_resp_dict)): - mock_post.assert_called_with(self.fo.url_for_add_src_route, - data=self.fo.data_for_add_src_route, - timeout=self.fo.timeout) + self.driver.configure_routes(self.fo.context, self.kwargs) + + data = list() + data.append(self.fo.data_for_add_src_route) + data = jsonutils.dumps(data) + mock_post.assert_called_with( + self.fo.get_url_for_api('add_src_route'), + data=data, timeout=self.fo.timeout) def test_delete_source_routes(self): """ Implements test case for clear routes method @@ -109,24 +139,25 @@ def test_delete_source_routes(self): """ with mock.patch.object( - requests, 'delete', return_value=self.resp) as mock_delete, \ + requests, 'delete', return_value=self.resp) as mock_delete, ( mock.patch.object( - self.resp, 'json', return_value=self.fake_resp_dict): + self.resp, 'json', return_value=self.fake_resp_dict)): self.driver.clear_routes( self.fo.context, self.kwargs) + data = list() + data.append(self.fo.data_for_del_src_route) + data = jsonutils.dumps(data) mock_delete.assert_called_with( - self.fo.url_for_del_src_route, - data=self.fo.data_for_del_src_route, - timeout=self.fo.timeout) + self.fo.get_url_for_api('del_src_route'), + data=data, timeout=self.fo.timeout) -""" Implements test cases for driver methods -of firewall. -""" +class FwaasDriverTestCase(base.BaseTestCase): + """ Implements test cases for driver methods + of firewall. - -class FwaasDriverTestCase(unittest.TestCase): + """ def __init__(self, *args, **kwargs): super(FwaasDriverTestCase, self).__init__(*args, **kwargs) @@ -136,6 +167,7 @@ def __init__(self, *args, **kwargs): self.driver = fw_dvr.FwaasDriver(mock_cfg) self.resp = mock.Mock() self.fake_resp_dict = {'status': True, + 'message': 'something', 'config_success': True, 'delete_success': True} self.fo.firewall = self.fo._fake_firewall_obj() @@ -150,29 +182,16 @@ def test_create_firewall_fwaasdriver(self): """ with mock.patch.object( - requests, 'post', return_value=self.resp) as mock_post, \ + requests, 'post', return_value=self.resp) as mock_post, ( mock.patch.object( - self.resp, 'json', return_value=self.fake_resp_dict): + self.resp, 'json', return_value=self.fake_resp_dict)): mock_post.configure_mock(status_code=200) self.driver.create_firewall(self.fo.context, self.fo.firewall, self.fo.host) - mock_post.assert_called_with(self.fo.url_for_config_fw, - self.firewall, + mock_post.assert_called_with(self.fo.get_url_for_api('config_fw'), + data=self.firewall, timeout=self.fo.timeout) - def test_create_firewall_key_error_fwaasdriver(self): - """ Implements test case for catching key error in - create firewall method of firewall's drivers. - - Returns: none - - """ - - self.fo.firewall.pop('description') - with self.assertRaises(KeyError): - self.driver.create_firewall(self.fo.context, - self.fo.firewall, self.fo.host) - def test_update_firewall_fwaasdriver(self): """ Implements test case for update firewall method of firewall's drivers. @@ -182,28 +201,15 @@ def test_update_firewall_fwaasdriver(self): """ with mock.patch.object( - requests, 'put', return_value=self.resp) as mock_put, \ + requests, 'put', return_value=self.resp) as mock_put, ( mock.patch.object( - self.resp, 'json', return_value=self.fake_resp_dict): + self.resp, 'json', return_value=self.fake_resp_dict)): self.driver.update_firewall(self.fo.context, self.fo.firewall, self.fo.host) - mock_put.assert_called_with(self.fo.url_for_update_fw, + mock_put.assert_called_with(self.fo.get_url_for_api('update_fw'), data=self.firewall, timeout=self.fo.timeout) - def test_update_firewall_key_error_fwaasdriver(self): - """ Implements test case for catching key error in - update firewall method of firewall's drivers. - - Returns: none - - """ - - self.fo.firewall.pop('description') - with self.assertRaises(KeyError): - self.driver.update_firewall(self.fo.context, - self.fo.firewall, self.fo.host) - def test_delete_firewall_fwaasdriver(self): """ Implements test case for delete firewall method of firewall's drivers. @@ -213,28 +219,11 @@ def test_delete_firewall_fwaasdriver(self): """ with mock.patch.object( - requests, 'delete', return_value=self.resp) as mock_delete, \ + requests, 'delete', return_value=self.resp) as mock_delete, ( mock.patch.object( - self.resp, 'json', return_value=self.fake_resp_dict): - self.driver.delete_firewall(self.fo.context, - self.fo.firewall, self.fo.host) - mock_delete.assert_called_with(self.fo.url_for_delete_fw, - data=self.firewall, - timeout=self.fo.timeout) - - def test_delete_firewall_key_error_fwaasdriver(self): - """ Implements test case for catching key error in - delete firewall method of firewall's drivers. - - Returns: none - - """ - - self.fo.firewall.pop('description') - with self.assertRaises(KeyError): + self.resp, 'json', return_value=self.fake_resp_dict)): self.driver.delete_firewall(self.fo.context, self.fo.firewall, self.fo.host) - - -if __name__ == '__main__': - unittest.main() + mock_delete.assert_called_with( + self.fo.get_url_for_api('delete_fw'), + data=self.firewall, timeout=self.fo.timeout) diff --git a/gbpservice/nfp/configurator/drivers/vpn/__init__.py b/gbpservice/contrib/tests/unit/nfp/configurator/drivers/loadbalancer/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/vpn/__init__.py rename to gbpservice/contrib/tests/unit/nfp/configurator/drivers/loadbalancer/__init__.py diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/drivers/loadbalancer/test_lb_driver.py b/gbpservice/contrib/tests/unit/nfp/configurator/drivers/loadbalancer/test_lb_driver.py similarity index 84% rename from gbpservice/neutron/tests/unit/nfp/configurator/drivers/loadbalancer/test_lb_driver.py rename to gbpservice/contrib/tests/unit/nfp/configurator/drivers/loadbalancer/test_lb_driver.py index 2babe9b638..83b22cac53 100644 --- a/gbpservice/neutron/tests/unit/nfp/configurator/drivers/loadbalancer/test_lb_driver.py +++ b/gbpservice/contrib/tests/unit/nfp/configurator/drivers/loadbalancer/test_lb_driver.py @@ -11,23 +11,20 @@ # under the License. import mock -import unittest -from gbpservice.neutron.tests.unit.nfp.configurator.test_data import ( +from gbpservice.contrib.nfp.configurator.agents import loadbalancer_v1 as lb +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v1.\ + haproxy import (haproxy_lb_driver as lb_driver) +from gbpservice.contrib.nfp.configurator.drivers.loadbalancer.v1.\ + haproxy import (haproxy_rest_client as _rest_client) +from gbpservice.contrib.tests.unit.nfp.configurator.test_data import ( lb_test_data as test_data) -from gbpservice.nfp.configurator.agents import loadbalancer_v1 as lb -from gbpservice.nfp.configurator.drivers.loadbalancer.v1.haproxy import ( - haproxy_lb_driver as lb_driver) -from gbpservice.nfp.configurator.drivers.loadbalancer.v1.haproxy import ( - haproxy_rest_client as _rest_client) +from neutron.tests import base +from oslo_serialization import jsonutils -""" Implement test cases for loadbalancer driver. - -""" - - -class HaproxyOnVmDriverTestCase(unittest.TestCase): +class HaproxyOnVmDriverTestCase(base.BaseTestCase): + """ Implements test cases for haproxy loadbalancer driver. """ def __init__(self, *args, **kwargs): super(HaproxyOnVmDriverTestCase, self).__init__(*args, **kwargs) @@ -63,7 +60,6 @@ def _get_lb_handler_objects(self, sc, drivers, rpcmgr): :param sc: mocked service controller object of process model framework :param drivers: mocked drivers object of loadbalancer object :param rpcmgr: mocked RPC manager object loadbalancer object - :param nqueue: mocked nqueue object of process model framework Returns: objects of LBaaSEventHandler of loadbalancer agent @@ -89,20 +85,16 @@ def _test_lbaasdriver(self, method_name): 'healthmonitors': self.fo.hm, 'members': self.fo.member} with mock.patch.object( - agent.plugin_rpc, - 'get_logical_device', - return_value=logical_device_return_value),\ + agent.plugin_rpc, 'get_logical_device', + return_value=logical_device_return_value), ( mock.patch.object( - driver, - '_get_rest_client', - return_value=rest_client),\ + driver, '_get_rest_client', return_value=rest_client)), ( mock.patch.object( - rest_client.pool, - 'request', return_value=self.resp) as (mock_request),\ + rest_client.pool, 'request', + return_value=self.resp)) as mock_request, ( mock.patch.object( - rest_client, - 'get_resource', - return_value=self.get_resource) as (mock_get_resource): + rest_client, 'get_resource', + return_value=self.get_resource)) as mock_get_resource: mock_request.status_code = 200 if method_name == 'DELETE_VIP': @@ -115,9 +107,10 @@ def _test_lbaasdriver(self, method_name): url=self.data.delete_vip_url) elif method_name == 'CREATE_VIP': driver.create_vip(self.fo.vip, self.fo.context) + data = jsonutils.dumps(self.data.create_vip_data) mock_request.assert_called_with( 'POST', - data=self.data.create_vip_data, + data=data, headers=self.data.header, timeout=30, url=self.data.create_vip_url) @@ -128,9 +121,10 @@ def _test_lbaasdriver(self, method_name): self.fo.old_vip, self.fo.vip, self.fo.context) + data = jsonutils.dumps(self.data.update_vip_data) mock_request.assert_called_with( 'PUT', - data=self.data.update_vip_data, + data=data, headers=self.data.header, timeout=self.data.timeout, url=self.data.update_vip_url) @@ -143,25 +137,28 @@ def _test_lbaasdriver(self, method_name): self.fo.old_pool, self.fo.pool, self.fo.context) + data = jsonutils.dumps(self.data.update_pool_data) mock_request.assert_called_with( 'PUT', - data=self.data.update_pool_data, + data=data, headers=self.data.header, timeout=self.data.timeout, url=self.data.update_pool_url) elif method_name == 'CREATE_MEMBER': driver.create_member(self.fo.member[0], self.fo.context) + data = jsonutils.dumps(self.data.create_member_data) mock_request.assert_called_with( 'PUT', - data=self.data.create_member_data, + data=data, headers=self.data.header, timeout=self.data.timeout, url=self.data.create_member_url) elif method_name == 'DELETE_MEMBER': driver.delete_member(self.fo.member[0], self.fo.context) + data = jsonutils.dumps(self.data.delete_member_data) mock_request.assert_called_with( 'PUT', - data=self.data.delete_member_data, + data=data, headers=self.data.header, timeout=self.data.timeout, url=self.data.delete_member_url) @@ -170,9 +167,10 @@ def _test_lbaasdriver(self, method_name): self.fo.old_member[0], self.fo.member[0], self.fo.context) + data = jsonutils.dumps(self.data.update_member_data) mock_request.assert_called_with( 'PUT', - data=self.data.update_member_data, + data=data, headers=self.data.header, timeout=self.data.timeout, url=self.data.update_member_url) @@ -180,9 +178,10 @@ def _test_lbaasdriver(self, method_name): driver.create_pool_health_monitor( self.fo.hm[0], self.fo._get_pool_object()[0]['id'], self.fo.context) + data = jsonutils.dumps(self.data.create_hm_data) mock_request.assert_called_with( 'PUT', - data=self.data.create_hm_data, + data=data, headers=self.data.header, timeout=self.data.timeout, url=self.data.create_hm_url) @@ -190,9 +189,10 @@ def _test_lbaasdriver(self, method_name): driver.delete_pool_health_monitor( self.fo.hm[0], self.fo._get_pool_object()[0]['id'], self.fo.context) + data = jsonutils.dumps(self.data.delete_hm_data) mock_request.assert_called_with( 'PUT', - data=self.data.delete_hm_data, + data=data, headers=self.data.header, timeout=self.data.timeout, url=self.data.delete_hm_url) @@ -201,9 +201,10 @@ def _test_lbaasdriver(self, method_name): self.fo.old_hm[0], self.fo.hm[0], self.fo._get_pool_object()[0]['id'], self.fo.context) + data = jsonutils.dumps(self.data.update_hm_data) mock_request.assert_called_with( 'PUT', - data=self.data.update_hm_data, + data=data, headers=self.data.header, timeout=self.data.timeout, url=self.data.update_hm_url) @@ -318,7 +319,3 @@ def test_pool_health_monitor_update_lbaasdriver(self): """ self._test_lbaasdriver('UPDATE_POOL_HEALTH_MONITOR') - - -if __name__ == '__main__': - unittest.main() diff --git a/gbpservice/nfp/configurator/drivers/vpn/vyos/__init__.py b/gbpservice/contrib/tests/unit/nfp/configurator/drivers/nfp_service/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/drivers/vpn/vyos/__init__.py rename to gbpservice/contrib/tests/unit/nfp/configurator/drivers/nfp_service/__init__.py diff --git a/gbpservice/contrib/tests/unit/nfp/configurator/drivers/nfp_service/test_heat_driver.py b/gbpservice/contrib/tests/unit/nfp/configurator/drivers/nfp_service/test_heat_driver.py new file mode 100644 index 0000000000..6b1530a140 --- /dev/null +++ b/gbpservice/contrib/tests/unit/nfp/configurator/drivers/nfp_service/test_heat_driver.py @@ -0,0 +1,44 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.tests import base + +from gbpservice.contrib.nfp.configurator.drivers.nfp_service.heat.heat_driver \ + import HeatDriver +from gbpservice.contrib.nfp.configurator.lib import ( + nfp_service_constants as const) +from gbpservice.contrib.tests.unit.nfp.configurator.test_data import ( + nfp_service_test_data as fo) + + +class NfpServiceHeatDriverTestCase(base.BaseTestCase): + """ Implements test cases for driver methods + of nfp service. + + """ + + def __init__(self, *args, **kwargs): + super(NfpServiceHeatDriverTestCase, self).__init__(*args, **kwargs) + self.fo = fo.FakeObjects() + + def test_configure_interfaces(self): + """ Implements test case for nfp service heat driver. + + Returns: none + + """ + + driver = HeatDriver(self.fo.conf) + actual_val = driver.run_heat(self.fo.context, self.fo.kwargs) + + expected_val = const.UNHANDLED_RESULT + self.assertEqual(actual_val, expected_val) diff --git a/gbpservice/nfp/configurator/lib/__init__.py b/gbpservice/contrib/tests/unit/nfp/configurator/drivers/vpn/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/lib/__init__.py rename to gbpservice/contrib/tests/unit/nfp/configurator/drivers/vpn/__init__.py diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/drivers/vpn/vyos/test_vpn_driver.py b/gbpservice/contrib/tests/unit/nfp/configurator/drivers/vpn/test_vpn_driver.py similarity index 56% rename from gbpservice/neutron/tests/unit/nfp/configurator/drivers/vpn/vyos/test_vpn_driver.py rename to gbpservice/contrib/tests/unit/nfp/configurator/drivers/vpn/test_vpn_driver.py index 9c5ff098eb..cee0c45724 100644 --- a/gbpservice/neutron/tests/unit/nfp/configurator/drivers/vpn/vyos/test_vpn_driver.py +++ b/gbpservice/contrib/tests/unit/nfp/configurator/drivers/vpn/test_vpn_driver.py @@ -11,39 +11,40 @@ # under the License. import requests -import unittest - -from gbpservice.neutron.tests.unit.nfp.configurator.test_data import \ - vpn_test_data -from gbpservice.nfp.configurator.agents import vpn -from gbpservice.nfp.configurator.drivers.base import base_driver -from gbpservice.nfp.configurator.drivers.vpn.vyos import vyos_vpn_driver -from oslo_serialization import jsonutils import json import mock +from gbpservice.contrib.nfp.configurator.agents import vpn +from gbpservice.contrib.nfp.configurator.drivers.base import base_driver +from gbpservice.contrib.nfp.configurator.drivers.vpn.vyos import ( + vyos_vpn_driver) +from gbpservice.contrib.tests.unit.nfp.configurator.test_data import ( + vpn_test_data) + +from neutron.tests import base + +from oslo_serialization import jsonutils + bdobj = base_driver.BaseDriver('conf') bdobj.register_agent_object_with_driver( 'agent', vpn.VpnaasRpcSender(vpn_test_data.VPNTestData().sc)) -""" Implements test cases for driver methods -of vpn. - - -""" - -class VpnaasIpsecDriverTestCase(unittest.TestCase): +class VpnaasIpsecDriverTestCase(base.BaseTestCase): + """ + Implements test cases for driver methods + of vpn. + """ def __init__(self, *args, **kwargs): super(VpnaasIpsecDriverTestCase, self).__init__(*args, **kwargs) self.conf = 'conf' - self.dict_objects = vpn_test_data.VPNTestData() - self.context = self.dict_objects.make_service_context() - self.plugin_rpc = vpn.VpnaasRpcSender(self.dict_objects.sc) + self.test_dict = vpn_test_data.VPNTestData() + self.context = self.test_dict.make_service_context() + self.plugin_rpc = vpn.VpnaasRpcSender(self.test_dict.sc) self.driver = vyos_vpn_driver.VpnaasIpsecDriver(self.conf) self.svc_validate = ( vyos_vpn_driver.VPNServiceValidator(self.plugin_rpc)) @@ -51,60 +52,62 @@ def __init__(self, *args, **kwargs): self.fake_resp_dict = {'status': True} def test_create_vpn_service(self): - ''' + """ Implements method to test the vpn driver's create vpn service. - ''' + """ - context = self.dict_objects.make_service_context(operation_type='vpn') + context = self.test_dict.make_service_context(operation_type='vpn') - kwargs = self.dict_objects.make_resource_data(operation='create', - service_type='vpn') + kwargs = self.test_dict.make_resource_data(operation='create', + service_type='vpn') with mock.patch.object(bdobj.agent, 'update_status') as ( mock_update_status): self.driver.vpnservice_updated(context, kwargs) mock_update_status.assert_called_with( self.context, - self.dict_objects.vpn_vpnsvc_active) + self.test_dict.vpn_vpnsvc_active) def test_create_ipsec_site_conn(self): - ''' + """ Implements method to test the vpn driver's create ipsec site conn - ''' + """ self.resp = mock.Mock(status_code=200) - context = self.dict_objects.make_service_context() - kwargs = self.dict_objects.make_resource_data(operation='create', - service_type='ipsec') - with mock.patch.object(bdobj.agent, 'update_status') as ( - mock_update_status),\ - mock.patch.object(jsonutils, 'loads') as mock_resp,\ - mock.patch.object(self.driver.agent, 'get_vpn_servicecontext', - return_value=[ - self.dict_objects.svc_context]),\ - mock.patch.object(requests, 'post') as ( - mock_post): + context = self.test_dict.make_service_context() + kwargs = self.test_dict.make_resource_data(operation='create', + service_type='ipsec') + with mock.patch.object( + bdobj.agent, 'update_status') as mock_update_status, ( + mock.patch.object(jsonutils, 'loads')) as mock_resp, ( + mock.patch.object(requests, 'post')) as mock_post, ( + mock.patch.object( + self.driver.agent, 'get_vpn_servicecontext', + return_value=[self.test_dict.svc_context])): mock_resp.return_value = self.fake_resp_dict mock_post.return_value = self.resp self.driver.vpnservice_updated(context, kwargs) mock_post.assert_called_with( - self.dict_objects.url_create_ipsec_tunnel, - data=jsonutils.dumps(self.dict_objects.data_), - timeout=self.dict_objects.timeout) + self.test_dict.url_create_ipsec_tunnel, + data=jsonutils.dumps(self.test_dict.data_), + timeout=self.test_dict.timeout) mock_update_status.assert_called_with( context, - self.dict_objects.ipsec_vpnsvc_status) + self.test_dict.ipsec_vpnsvc_status) + + def _dict_to_query_str(self, args): + return '&'.join([str(k) + '=' + str(v) for k, v in args.iteritems()]) def test_delete_ipsec_site_conn(self): - ''' + """ Implements method to test the vpn driver's create ipsec site conn - ''' + """ self.resp = mock.Mock(status_code=200) - kwargs = self.dict_objects.make_resource_data(operation='delete', - service_type='ipsec') - with mock.patch.object(self.plugin_rpc, 'ipsec_site_conn_deleted'),\ - mock.patch.object(json, 'loads') as mock_resp,\ - mock.patch.object(requests, 'delete') as ( + kwargs = self.test_dict.make_resource_data(operation='delete', + service_type='ipsec') + with mock.patch.object(self.plugin_rpc, 'ipsec_site_conn_deleted'), ( + mock.patch.object(json, 'loads')) as mock_resp, ( + mock.patch.object(requests, 'delete')) as ( mock_delete): mock_resp.return_value = self.fake_resp_dict mock_delete.return_value = self.resp @@ -114,58 +117,64 @@ def test_delete_ipsec_site_conn(self): tokens = svc_desc.split(';') cidr = tokens[1].split('=')[1] - url = "?local_cidr=" + cidr + "&peer_address=" + ( - resource['peer_address'] + ( - "&peer_cidrs=[u\'" + resource['peer_cidrs'][0] + "\']")) - url = self.dict_objects.url_delete_ipsec_tunnel + url + tunnel = {} + tunnel['peer_address'] = resource['peer_address'] + tunnel['local_cidr'] = cidr + tunnel['peer_cidrs'] = resource['peer_cidrs'] + + url = (self.test_dict.url_delete_ipsec_tunnel + '?' + + self._dict_to_query_str(tunnel)) + mock_delete.assert_called_with( url.encode('ascii', 'ignore'), - timeout=self.dict_objects.timeout, + timeout=self.test_dict.timeout, data=None) def test_check_status(self): - ''' + """ Implements method to test the vpn driver's check status - ''' + """ self.resp = mock.Mock(status_code=200) - svc_context = self.dict_objects.svc_context - with mock.patch.object(self.plugin_rpc, 'update_status'),\ - mock.patch.object(self.resp, 'json') as mock_json,\ - mock.patch.object(requests, 'get') as mock_get: + svc_context = self.test_dict.svc_context + with mock.patch.object(self.plugin_rpc, 'update_status'), ( + mock.patch.object(self.resp, 'json')) as mock_json, ( + mock.patch.object(requests, 'get')) as mock_get: mock_get.return_value = self.resp mock_json.return_value = {'state': 'DOWN'} state = self.driver.check_status(self.context, svc_context) self.assertEqual(state, None) -""" Implements test cases for driver methods -of generic config. - -""" +class VpnGenericConfigDriverTestCase(base.BaseTestCase): + """ + Implements test cases for driver methods + of generic config. -class VpnGenericConfigDriverTestCase(unittest.TestCase): - + """ def __init__(self, *args, **kwargs): super(VpnGenericConfigDriverTestCase, self).__init__(*args, **kwargs) self.conf = 'conf' - self.dict_objects = vpn_test_data.VPNTestData() - self.context = self.dict_objects.make_service_context() - self.plugin_rpc = vpn.VpnaasRpcSender(self.dict_objects.sc) - self.rest_apt = vyos_vpn_driver.RestApi(self.dict_objects.vm_mgmt_ip) - self.driver = vyos_vpn_driver.VpnGenericConfigDriver(self.conf) + self.test_dict = vpn_test_data.VPNTestData() + self.context = self.test_dict.make_service_context() + self.plugin_rpc = vpn.VpnaasRpcSender(self.test_dict.sc) + self.rest_apt = vyos_vpn_driver.RestApi(self.test_dict.vm_mgmt_ip) + self.driver = vyos_vpn_driver.VpnGenericConfigDriver() self.resp = mock.Mock() self.fake_resp_dict = {'status': True} - self.kwargs = self.dict_objects.fake_resource_data() + self.kwargs = self.test_dict.fake_resource_data() def setUp(self): + super(VpnGenericConfigDriverTestCase, self).setUp() self.resp = mock.Mock(status_code=200) def tearDown(self): + super(VpnGenericConfigDriverTestCase, self).tearDown() self.resp = mock.Mock(status_code=200) def test_configure_interfaces(self): - """ Implements test case for configure interfaces method + """ + Implements test case for configure interfaces method of generic config driver. Returns: none @@ -173,21 +182,22 @@ def test_configure_interfaces(self): """ with mock.patch.object( - requests, 'post', return_value=self.resp) as mock_post, \ + requests, 'post', return_value=self.resp) as mock_post, ( mock.patch.object(self.resp, 'json', - return_value=self.fake_resp_dict): - self.driver.configure_interfaces(self.dict_objects.context_device, + return_value=self.fake_resp_dict)): + self.driver.configure_interfaces(self.test_dict.context_device, self.kwargs) mock_post.assert_called_with( - self.dict_objects.url_for_add_inte, + self.test_dict.url_for_add_inte, jsonutils.dumps( - self.dict_objects.data_for_interface), - timeout=self.dict_objects.timeout) + self.test_dict.data_for_interface), + timeout=self.test_dict.timeout) def test_clear_interfaces(self): - """ Implements test case for clear interfaces method + """ + Implements test case for clear interfaces method of generic config driver. Returns: none @@ -196,20 +206,21 @@ def test_clear_interfaces(self): self.resp = mock.Mock(status_code=200) with mock.patch.object( - requests, 'delete', return_value=self.resp) as mock_delete, \ + requests, 'delete', return_value=self.resp) as mock_delete, ( mock.patch.object( - self.resp, 'json', return_value=self.fake_resp_dict): - self.driver.clear_interfaces(self.dict_objects.context_device, + self.resp, 'json', return_value=self.fake_resp_dict)): + self.driver.clear_interfaces(self.test_dict.context_device, self.kwargs) mock_delete.assert_called_with( - self.dict_objects.url_for_del_inte, + self.test_dict.url_for_del_inte, data=jsonutils.dumps( - self.dict_objects.data_for_interface), - timeout=self.dict_objects.timeout) + self.test_dict.data_for_interface), + timeout=self.test_dict.timeout) def test_configure_source_routes(self): - """ Implements test case for configure routes method + """ + Implements test case for configure routes method of generic config driver. Returns: none @@ -217,55 +228,56 @@ def test_configure_source_routes(self): """ with mock.patch.object( - requests, 'post', return_value=self.resp) as mock_post, \ + requests, 'post', return_value=self.resp) as mock_post, ( mock.patch.object(jsonutils, 'loads', - return_value=self.fake_resp_dict): - self.driver.configure_routes(self.dict_objects.context_device, + return_value=self.fake_resp_dict)): + self.driver.configure_routes(self.test_dict.context_device, self.kwargs) mock_post.assert_called_with( - self.dict_objects.url_for_add_src_route, + self.test_dict.url_for_add_src_route, data=jsonutils.dumps( - self.dict_objects.data_for_add_src_route), - timeout=self.dict_objects.timeout) + self.test_dict.data_for_add_src_route), + timeout=self.test_dict.timeout) def test_delete_source_routes(self): - """ Implements test case for clear routes method + """ + Implements test case for clear routes method of generic config driver. Returns: none """ - with mock.patch.object(requests, 'post', return_value=self.resp), \ + with mock.patch.object(requests, 'post', return_value=self.resp), ( mock.patch.object( - requests, 'delete', return_value=self.resp) as mock_delete: + requests, 'delete', return_value=self.resp)) as mock_delete: self.driver.clear_routes( - self.dict_objects.context_device, self.kwargs) + self.test_dict.context_device, self.kwargs) mock_delete.assert_called_with( - self.dict_objects.url_for_del_src_route, + self.test_dict.url_for_del_src_route, data=jsonutils.dumps( - self.dict_objects.data_for_del_src_route), - timeout=self.dict_objects.timeout) + self.test_dict.data_for_del_src_route), + timeout=self.test_dict.timeout) -class VPNSvcValidatorTestCase(unittest.TestCase): +class VPNSvcValidatorTestCase(base.BaseTestCase): def __init__(self, *args, **kwargs): super(VPNSvcValidatorTestCase, self).__init__(*args, **kwargs) - self.dict_objects = vpn_test_data.VPNTestData() - self.plugin_rpc = vpn.VpnaasRpcSender(self.dict_objects.sc) + self.test_dict = vpn_test_data.VPNTestData() + self.plugin_rpc = vpn.VpnaasRpcSender(self.test_dict.sc) self.valid_obj = vyos_vpn_driver.VPNServiceValidator(self.plugin_rpc) def test_validate_active(self): - ''' + """ Implements testcase for vpn driver's validate method to test in success condition while making call to the service VM - ''' + """ - context = self.dict_objects.make_service_context() - svc = self.dict_objects._create_vpnservice_obj()['resource'] + context = self.test_dict.make_service_context() + svc = self.test_dict._create_vpnservice_obj()['resource'] description = str(svc['description']) description = description.split(';') description[1] = 'tunnel_local_cidr=12.0.6.0/24' @@ -275,29 +287,28 @@ def test_validate_active(self): with mock.patch.object(self.plugin_rpc, "update_status") as mock_valid: self.valid_obj.validate(context, svc) mock_valid.assert_called_with(context, - self.dict_objects.vpn_vpnsvc_active) + self.test_dict.vpn_vpnsvc_active) def test_validate_error(self): - ''' + """ Implements testcase for vpn driver's validate method to test in fail condition while making call to the service VM - ''' + """ - context = self.dict_objects.make_service_context() + context = self.test_dict.make_service_context() with mock.patch.object(self.plugin_rpc, "update_status") as mock_valid: self.valid_obj.validate( context, - self.dict_objects._create_vpnservice_obj()['resource']) + self.test_dict._create_vpnservice_obj()['resource']) mock_valid.assert_called_with( context, - self.dict_objects.vpn_vpnsvc_active) - + self.test_dict.vpn_vpnsvc_active) -class RestApiTestCase(unittest.TestCase): - ''' +class RestApiTestCase(base.BaseTestCase): + """ Class which implements the testcases to test the vpn RestApi calls. - ''' + """ def __init__(self, *args, **kwargs): super(RestApiTestCase, self).__init__(*args, **kwargs) @@ -305,7 +316,7 @@ def __init__(self, *args, **kwargs): vpn_test_data.VPNTestData().vm_mgmt_ip)) self.resp = mock.Mock() self.resp = mock.Mock(status_code=200) - self.dict_objects = vpn_test_data.VPNTestData() + self.test_dict = vpn_test_data.VPNTestData() self.args = {'peer_address': '1.103.2.2'} self.fake_resp_dict = {'status': None} self.timeout = 90 @@ -313,62 +324,43 @@ def __init__(self, *args, **kwargs): self.j_data = jsonutils.dumps(self.data) def test_post_success(self): - ''' + """ Implements testcase for vpn drivers post method to test in success condition while making call to the service VM - ''' + """ self.resp = mock.Mock(status_code=200) self.fake_resp_dict.update({'status': True}) with mock.patch.object(requests, 'post', return_value=self.resp) as ( - mock_post),\ + mock_post), ( mock.patch.object(jsonutils, 'loads', - return_value=self.fake_resp_dict): + return_value=self.fake_resp_dict)): self.rest_obj.post('create-ipsec-site-conn', self.data) mock_post.assert_called_with( - self.dict_objects.url_create_ipsec_conn, - data=self.j_data, - timeout=self.timeout) - - def test_post_fail(self): - ''' - Implements testcase for vpn drivers post method to test in - fail condition while making call to the service VM - ''' - - self.resp = mock.Mock(status_code=404) - self.fake_resp_dict.update({'status': False}) - with mock.patch.object(requests, 'post', return_value=self.resp) as ( - mock_post),\ - mock.patch.object(jsonutils, 'loads', - return_value=self.fake_resp_dict): - with self.assertRaises(requests.exceptions.HTTPError): - self.rest_obj.post('create-ipsec-site-conn', self.data) - mock_post.assert_called_with( - self.dict_objects.url_create_ipsec_conn, + self.test_dict.url_create_ipsec_conn, data=self.j_data, timeout=self.timeout) def test_put_success(self): - ''' + """ Implements testcase for vpn drivers put method to test in success condition while making call to the service VM - ''' + """ self.resp = mock.Mock(status_code=200) with mock.patch.object(requests, 'put', return_value=self.resp) as ( mock_put): self.rest_obj.put('create-ipsec-site-conn', self.data) mock_put.assert_called_with( - self.dict_objects.url_create_ipsec_conn, + self.test_dict.url_create_ipsec_conn, data=self.j_data, timeout=self.timeout) def test_put_fail(self): - ''' + """ Implements testcase for vpn drivers put method to test in fail condition while making call to the service VM - ''' + """ self.resp = mock.Mock(status_code=404) with mock.patch.object(requests, 'put', return_value=self.resp) as ( @@ -376,79 +368,55 @@ def test_put_fail(self): self.rest_obj.put('create-ipsec-site-conn', self.data) mock_put.assert_called_with( - self.dict_objects.url_create_ipsec_conn, + self.test_dict.url_create_ipsec_conn, data=jsonutils.dumps(self.data), timeout=self.timeout) def test_delete_success(self): - ''' + """ Implements testcase for vpn drivers delete method to test in success condition while making call to the service VM - ''' + """ self.resp = mock.Mock(status_code=200) self.fake_resp_dict.update({'status': True}) with mock.patch.object(requests, 'delete', return_value=self.resp) as ( - mock_delete),\ + mock_delete), ( mock.patch.object(jsonutils, 'loads', - return_value=self.fake_resp_dict): + return_value=self.fake_resp_dict)): self.rest_obj.delete('delete-ipsec-site-conn', self.args, self.data) mock_delete.assert_called_with( - self.dict_objects.url_delete_ipsec_conn, - timeout=self.timeout, - data=self.j_data) - - def test_delete_fail(self): - ''' - Implements testcase for vpn drivers delete method to test in - fail condition while making call to the service VM - ''' - - self.resp = mock.Mock(status_code=404) - self.fake_resp_dict.update({'status': False}) - with mock.patch.object(requests, 'delete', return_value=self.resp) as ( - mock_delete),\ - mock.patch.object(jsonutils, 'loads', - return_value=self.fake_resp_dict): - with self.assertRaises(requests.exceptions.HTTPError): - self.rest_obj.delete('delete-ipsec-site-conn', - self.args, - self.data) - mock_delete.assert_called_with( - self.dict_objects.url_delete_ipsec_conn, + self.test_dict.url_delete_ipsec_conn, timeout=self.timeout, data=self.j_data) def test_get_success(self): - ''' + """ Implements testcase for vpn drivers get methode to test in fail condition while making call to the service VM - ''' + """ self.resp = mock.Mock(status_code=200) with mock.patch.object(requests, 'get', return_value=self.resp) as ( mock_get): self.rest_obj.get('create-ipsec-site-tunnel', self.data) mock_get.assert_called_with( - self.dict_objects.url_create_ipsec_tunnel, + self.test_dict.url_create_ipsec_tunnel, params=self.data, timeout=self.timeout) def test_get_fail(self): - ''' + """ Implements testcase for vpn drivers get methode to test in fail condition - ''' + """ self.resp = mock.Mock(status_code=404) with mock.patch.object(requests, 'get', return_value=self.resp) as ( mock_get): self.rest_obj.get('create-ipsec-site-tunnel', self.data) mock_get.assert_called_with( - self.dict_objects.url_create_ipsec_tunnel, + self.test_dict.url_create_ipsec_tunnel, params=self.data, timeout=self.timeout) - -if __name__ == '__main__': - unittest.main() diff --git a/gbpservice/nfp/configurator/modules/__init__.py b/gbpservice/contrib/tests/unit/nfp/configurator/lib/__init__.py similarity index 100% rename from gbpservice/nfp/configurator/modules/__init__.py rename to gbpservice/contrib/tests/unit/nfp/configurator/lib/__init__.py diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/lib/filter_base.py b/gbpservice/contrib/tests/unit/nfp/configurator/lib/filter_base.py similarity index 99% rename from gbpservice/neutron/tests/unit/nfp/configurator/lib/filter_base.py rename to gbpservice/contrib/tests/unit/nfp/configurator/lib/filter_base.py index 81d6a09d71..87b351506a 100644 --- a/gbpservice/neutron/tests/unit/nfp/configurator/lib/filter_base.py +++ b/gbpservice/contrib/tests/unit/nfp/configurator/lib/filter_base.py @@ -11,13 +11,12 @@ # under the License. -import unittest +from neutron.tests import base -""" Defines all the dummy resources needed for test_filter.py -""" - -class BaseTestCase(unittest.TestCase): +class BaseTestCase(base.BaseTestCase): + """ Defines all the dummy resources needed for test_filter.py + """ def __init__(self, *args, **kwargs): super(BaseTestCase, self).__init__(*args, **kwargs) self.service_info = {} diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/lib/test_demuxer.py b/gbpservice/contrib/tests/unit/nfp/configurator/lib/test_demuxer.py similarity index 89% rename from gbpservice/neutron/tests/unit/nfp/configurator/lib/test_demuxer.py rename to gbpservice/contrib/tests/unit/nfp/configurator/lib/test_demuxer.py index dd076622cb..85243e46ed 100644 --- a/gbpservice/neutron/tests/unit/nfp/configurator/lib/test_demuxer.py +++ b/gbpservice/contrib/tests/unit/nfp/configurator/lib/test_demuxer.py @@ -10,18 +10,14 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest - -from gbpservice.neutron.tests.unit.nfp.configurator.test_data import ( +from gbpservice.contrib.nfp.configurator.lib import demuxer +from gbpservice.contrib.tests.unit.nfp.configurator.test_data import ( fw_test_data as fo) -from gbpservice.nfp.configurator.lib import demuxer - -""" Implements test cases for demuxer of configurator. - -""" +from neutron.tests import base -class ServiceAgentDemuxerTestCase(unittest.TestCase): +class ServiceAgentDemuxerTestCase(base.BaseTestCase): + """ Implements test cases for demuxer of configurator. """ def __init__(self, *args, **kwargs): super(ServiceAgentDemuxerTestCase, self).__init__(*args, **kwargs) self.fo = fo.FakeObjects() diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/lib/test_filter.py b/gbpservice/contrib/tests/unit/nfp/configurator/lib/test_filter.py similarity index 98% rename from gbpservice/neutron/tests/unit/nfp/configurator/lib/test_filter.py rename to gbpservice/contrib/tests/unit/nfp/configurator/lib/test_filter.py index d70572345d..f14b93b049 100644 --- a/gbpservice/neutron/tests/unit/nfp/configurator/lib/test_filter.py +++ b/gbpservice/contrib/tests/unit/nfp/configurator/lib/test_filter.py @@ -12,14 +12,12 @@ import filter_base -from gbpservice.nfp.configurator.lib import data_filter +from gbpservice.contrib.nfp.configurator.lib import data_filter import mock -"""Test class to test data_filter.py using unittest framework -""" - class FilterTest(filter_base.BaseTestCase): + """Test class to test data_filter.py using unittest framework """ def __init__(self, *args, **kwargs): super(FilterTest, self).__init__(*args, **kwargs) diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/lib/test_schema_validator.py b/gbpservice/contrib/tests/unit/nfp/configurator/lib/test_schema_validator.py similarity index 93% rename from gbpservice/neutron/tests/unit/nfp/configurator/lib/test_schema_validator.py rename to gbpservice/contrib/tests/unit/nfp/configurator/lib/test_schema_validator.py index 080ac7937d..0260cf8b0f 100644 --- a/gbpservice/neutron/tests/unit/nfp/configurator/lib/test_schema_validator.py +++ b/gbpservice/contrib/tests/unit/nfp/configurator/lib/test_schema_validator.py @@ -10,17 +10,15 @@ # License for the specific language governing permissions and limitations # under the License. -import gbpservice.nfp.configurator.lib.schema as schema -import gbpservice.nfp.configurator.lib.schema_validator as sv -import unittest - - -"""SchemaResources is a helper class which contains all the dummy resources - needed for TestSchemaValidator class -""" +import gbpservice.contrib.nfp.configurator.lib.schema as schema +import gbpservice.contrib.nfp.configurator.lib.schema_validator as sv +from neutron.tests import base class SchemaResources(object): + """SchemaResources is a helper class which contains all the dummy resources + needed for TestSchemaValidator class + """ resource_healthmonitor = 'healthmonitor' resource_interfaces = 'interfaces' resource_routes = 'routes' @@ -65,12 +63,11 @@ class SchemaResources(object): 'periodicity': 'initial' } -"""TestSchemaValidator is a test class to test schema_validator.py using - unittest framework -""" - -class TestSchemaValidator(unittest.TestCase): +class TestSchemaValidator(base.BaseTestCase): + """TestSchemaValidator is a test class to test schema_validator.py using + unittest framework + """ def __init__(self, *args, **kwargs): super(TestSchemaValidator, self).__init__(*args, **kwargs) @@ -179,6 +176,3 @@ def test_decode_for_neutron_apis(self): request_data['info']['service_type'] = 'firewall' result = self.sv.decode(request_data, False) self.assertTrue(result) - -if __name__ == '__main__': - unittest.main() diff --git a/gbpservice/nfp/service_plugins/__init__.py b/gbpservice/contrib/tests/unit/nfp/configurator/modules/__init__.py similarity index 100% rename from gbpservice/nfp/service_plugins/__init__.py rename to gbpservice/contrib/tests/unit/nfp/configurator/modules/__init__.py diff --git a/gbpservice/contrib/tests/unit/nfp/configurator/modules/test_configurator.py b/gbpservice/contrib/tests/unit/nfp/configurator/modules/test_configurator.py new file mode 100644 index 0000000000..1e4407669b --- /dev/null +++ b/gbpservice/contrib/tests/unit/nfp/configurator/modules/test_configurator.py @@ -0,0 +1,290 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.tests import base + +from gbpservice.contrib.nfp.configurator.lib import demuxer as demuxer_lib +from gbpservice.contrib.nfp.configurator.modules import configurator as cfgr +from gbpservice.contrib.tests.unit.nfp.configurator.test_data import ( + fw_test_data as fo) + + +class ConfiguratorRpcManagerTestCase(base.BaseTestCase): + """ Tests RPC manager class of configurator + + """ + + def __init__(self, *args, **kwargs): + super(ConfiguratorRpcManagerTestCase, self).__init__(*args, **kwargs) + self.fo = fo.FakeObjects() + + @mock.patch(__name__ + '.fo.FakeObjects.conf') + @mock.patch(__name__ + '.fo.FakeObjects.sc') + def _get_ConfiguratorRpcManager_object(self, sc, conf): + """ Retrieves RPC manager object of configurator. + + :param sc: mocked service controller object of process model framework + :param conf: mocked OSLO configuration file + + Returns: object of configurator's RPC manager. + + """ + + cm = cfgr.ConfiguratorModule(sc) + demuxer = demuxer_lib.ServiceAgentDemuxer() + rpc_mgr = cfgr.ConfiguratorRpcManager(sc, cm, conf, demuxer) + return sc, rpc_mgr + + def _test_network_function_device_config(self, operation, + method, batch=False): + """ Tests generic config APIs + + :param operation: create/delete + :param method: CONFIGURE_ROUTES/CLEAR_ROUTES/ + CONFIGURE_INTERFACES/CLEAR_INTERFACES + :param batch: True or False. Indicates if the + request is a batch request + + Returns: none + + """ + + sc, rpc_mgr = self._get_ConfiguratorRpcManager_object() + agent = mock.Mock() + + request_data = {'batch': { + 'request_data_actual': ( + self.fo.fake_request_data_generic_bulk()), + 'request_data_expected': ( + self.fo.fake_request_data_generic_bulk())}, + 'single': { + 'request_data_actual': ( + (self.fo.fake_request_data_generic_single( + routes=True) + if 'ROUTES' in method + else self.fo.fake_request_data_generic_single())), + 'request_data_expected': ( + (self.fo.fake_request_data_generic_single( + routes=True) + if 'ROUTES' in method + else self.fo.fake_request_data_generic_single()))} + } + if batch: + request_data_actual, request_data_expected = ( + request_data['batch'].values()) + else: + request_data_actual, request_data_expected = ( + request_data['single'].values()) + + with mock.patch.object(rpc_mgr, + '_get_service_agent_instance', + return_value=agent), ( + mock.patch.object(agent, 'process_request')) as mock_request: + + if operation == 'create': + rpc_mgr.create_network_function_device_config( + self.fo.context, request_data_actual) + elif operation == 'delete': + rpc_mgr.delete_network_function_device_config( + self.fo.context, request_data_actual) + + context = request_data_expected['info']['context'] + + agent_info = {} + agent_info.update( + {'resource': request_data_expected['config'][0][ + 'resource'], + 'resource_type': request_data_expected['info'][ + 'service_type'], + 'service_vendor': request_data_expected['info'][ + 'service_vendor'], + 'context': context, + 'notification_data': {} + }) + notification_data = dict() + sa_req_list = self.fo.fake_sa_req_list() + + response_data = {'single': {'routes': [sa_req_list[1]], + 'interfaces': [sa_req_list[0]]}, + 'batch': sa_req_list} + + if batch: + data = response_data['batch'] + if operation == 'delete': + data[0]['method'] = 'clear_interfaces' + data[1]['method'] = 'clear_routes' + else: + data = response_data['single'][method.split('_')[1].lower()] + if operation == 'delete': + data[0]['method'] = data[0]['method'].replace( + 'configure', 'clear', 1) + mock_request.assert_called_with(data, + notification_data) + + def _test_network_function_config(self, operation): + """ Tests firewall APIs + + :param operation: CREATE_FIREWALL/UPDATE_FIREWALL/DELETE_FIREWALL + + Returns: none + + """ + + sc, rpc_mgr = self._get_ConfiguratorRpcManager_object() + agent = mock.Mock() + method = {'CREATE': 'create_network_function_config', + 'UPDATE': 'update_network_function_config', + 'DELETE': 'delete_network_function_config'} + request_data = self.fo.fake_request_data_fw() + with mock.patch.object(rpc_mgr, + '_get_service_agent_instance', + return_value=agent), ( + mock.patch.object(agent, 'process_request')) as mock_request: + + getattr(rpc_mgr, method[operation.split('_')[0]])( + self.fo.fw_context, + request_data) + + notification_data = dict() + data = self.fo.fake_sa_req_list_fw() + if 'UPDATE' in operation: + data[0]['method'] = data[0]['method'].replace( + 'create', 'update', 1) + elif 'DELETE' in operation: + data[0]['method'] = data[0]['method'].replace( + 'create', 'delete', 1) + + mock_request.assert_called_with(data, + notification_data) + + def _test_notifications(self): + """ Tests response path notification APIs + + Returns: none + + """ + + sc, rpc_mgr = self._get_ConfiguratorRpcManager_object() + + events = fo.FakeEventGetNotifications() + with mock.patch.object(sc, 'get_stashed_events', + return_value=[events]): + + return_value = rpc_mgr.get_notifications('context') + + expected_value = [events.data] + self.assertEqual(return_value, expected_value) + + def test_configure_routes_generic_api(self): + """ Implements test case for configure routes API + + Returns: none + + """ + + method = "CONFIGURE_ROUTES" + operation = 'create' + self._test_network_function_device_config(operation, method) + + def test_clear_routes_generic_api(self): + """ Implements test case for clear routes API + + Returns: none + + """ + + method = "CLEAR_ROUTES" + operation = 'delete' + self._test_network_function_device_config(operation, method) + + def test_configure_interfaces_generic_api(self): + """ Implements test case for configure interfaces API + + Returns: none + + """ + + method = "CONFIGURE_INTERFACES" + operation = 'create' + self._test_network_function_device_config(operation, method) + + def test_clear_interfaces_generic_api(self): + """ Implements test case for clear interfaces API + + Returns: none + + """ + + method = "CLEAR_INTERFACES" + operation = 'delete' + self._test_network_function_device_config(operation, method) + + def test_configure_bulk_generic_api(self): + """ Implements test case for bulk configure request API + + Returns: none + + """ + + method = "PROCESS_BATCH" + operation = 'create' + self._test_network_function_device_config(operation, method, True) + + def test_clear_bulk_generic_api(self): + """ Implements test case for bulk clear request API + + Returns: none + + """ + + method = "PROCESS_BATCH" + operation = 'delete' + self._test_network_function_device_config(operation, method, True) + + def test_network_function_create_api(self): + """ Implements test case for create firewall API + + Returns: none + + """ + + self._test_network_function_config('CREATE_FIREWALL') + + def test_network_function_update_api(self): + """ Implements test case for update firewall API + + Returns: none + + """ + + self._test_network_function_config('UPDATE_FIREWALL') + + def test_network_function_delete_api(self): + """ Implements test case for delete firewall API + + Returns: none + + """ + + self._test_network_function_config('DELETE_FIREWALL') + + def test_get_notifications_generic_configurator_api(self): + """ Implements test case for get notifications API + of configurator + + Returns: none + + """ + + self._test_notifications() diff --git a/gbpservice/nfp/service_plugins/firewall/__init__.py b/gbpservice/contrib/tests/unit/nfp/configurator/test_data/__init__.py similarity index 100% rename from gbpservice/nfp/service_plugins/firewall/__init__.py rename to gbpservice/contrib/tests/unit/nfp/configurator/test_data/__init__.py diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/test_data/asav_fw_test_data.py b/gbpservice/contrib/tests/unit/nfp/configurator/test_data/asav_fw_test_data.py similarity index 100% rename from gbpservice/neutron/tests/unit/nfp/configurator/test_data/asav_fw_test_data.py rename to gbpservice/contrib/tests/unit/nfp/configurator/test_data/asav_fw_test_data.py diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/test_data/fw_test_data.py b/gbpservice/contrib/tests/unit/nfp/configurator/test_data/fw_test_data.py similarity index 81% rename from gbpservice/neutron/tests/unit/nfp/configurator/test_data/fw_test_data.py rename to gbpservice/contrib/tests/unit/nfp/configurator/test_data/fw_test_data.py index 8af7789147..e0b91292b7 100644 --- a/gbpservice/neutron/tests/unit/nfp/configurator/test_data/fw_test_data.py +++ b/gbpservice/contrib/tests/unit/nfp/configurator/test_data/fw_test_data.py @@ -10,12 +10,12 @@ # License for the specific language governing permissions and limitations # under the License. -""" Implements fake objects for assertion. -""" +class FakeObjects(object): + """ Implements fake objects for assertion. + """ -class FakeObjects(object): sc = 'sc' empty_dict = {} context = 'APIcontext' @@ -24,7 +24,8 @@ class FakeObjects(object): 'agent_info': { 'resource': 'firewall', 'service_vendor': 'vyos', - 'context': {'requester': 'device_orch'}, + 'context': {'requester': 'device_orch', + 'logging_context': {}}, 'resource_type': 'firewall'}, 'notification_data': {}, 'service_info': {}, 'resource': 'firewall'} @@ -34,29 +35,43 @@ class FakeObjects(object): kwargs = {'vmid': 'vmid'} rpcmgr = 'rpcmgr' drivers = 'drivers' - vm_mgmt_ip = '172.24.4.5' - service_vendor = 'service_vendor' - source_cidrs = ['1.2.3.4/24'] - destination_cidr = 'destination_cidr' - gateway_ip = '1.2.3.4' provider_interface_position = 'provider_interface_position' - url = 'http://172.24.4.5:8888' - url_for_add_inte = "%s/add_rule" % url - url_for_del_inte = "%s/delete_rule" % url - url_for_add_src_route = "%s/add-source-route" % url - url_for_del_src_route = "%s/delete-source-route" % url - url_for_config_fw = "%s/configure-firewall-rule" % url - url_for_update_fw = "%s/update-firewall-rule" % url - url_for_delete_fw = "%s/delete-firewall-rule" % url - data = ('{"stitching_mac": "00:0a:95:9d:68:16",' - '"provider_mac": "00:0a:95:9d:68:16"}') - data_for_interface = ('{"stitching_mac": "00:0a:95:9d:68:16",' - ' "provider_mac": "00:0a:95:9d:68:16"}') - data_for_add_src_route = ('[{"source_cidr": "1.2.3.4/24", ' - '"gateway_ip": "1.2.3.4"}]') - data_for_del_src_route = '[{"source_cidr": "1.2.3.4/24"}]' + data_for_interface = dict(provider_mac="00:0a:95:9d:68:16", + stitching_mac="00:0a:95:9d:68:16") + data_for_add_src_route = {'source_cidr': "1.2.3.4/24", + 'gateway_ip': "1.2.3.4"} + data_for_del_src_route = {'source_cidr': '1.2.3.4/24'} timeout = 120 + def get_url_for_api(self, api): + url = 'http://172.24.4.5:8888/' + api_url_map = { + 'log_forward': 'configure-rsyslog-as-client', + 'add_static_ip': 'add_static_ip', + 'add_inte': 'add_rule', + 'del_inte': 'delete_rule', + 'add_src_route': 'add-source-route', + 'del_src_route': 'delete-source-route', + 'config_fw': 'configure-firewall-rule', + 'update_fw': 'update-firewall-rule', + 'delete_fw': 'delete-firewall-rule'} + + return url + api_url_map[api] + + def log_forward_data(self): + return dict(server_ip={}, server_port={}, log_level={}) + + def static_ip_data(self): + return dict( + provider_ip="11.0.1.1", + provider_cidr="11.0.1.0/24", + provider_mac="00:0a:95:9d:68:16", + stitching_ip="192.168.0.3", + stitching_cidr="192.168.0.0/28", + stitching_mac="00:0a:95:9d:68:16", + provider_interface_position="2", + stitching_interface_position="3") + def fake_request_data_generic_bulk(self): """ A sample bulk request data for generic APIs @@ -70,7 +85,8 @@ def fake_request_data_generic_bulk(self): "service_type": "firewall", "service_vendor": "vyos", "context": { - "requester": "device_orch" + "requester": "device_orch", + "logging_context": {} } }, "config": [{ @@ -127,7 +143,8 @@ def fake_request_data_fw(self): "service_type": "firewall", "service_vendor": "vyos", "context": { - "requester": "device_orch" + "requester": "device_orch", + "logging_context": {} } }, "config": [{ @@ -155,7 +172,8 @@ def fake_sa_req_list_fw(self): "service_vendor": "vyos", "resource": "firewall", "context": { - "requester": "device_orch" + "requester": "device_orch", + "logging_context": {} }, "resource_type": "firewall" }, @@ -204,7 +222,8 @@ def fake_sa_req_list(self): "service_vendor": "vyos", "resource": "interfaces", "context": { - "requester": "device_orch" + "requester": "device_orch", + "logging_context": {} }, "resource_type": "firewall" }, @@ -227,7 +246,8 @@ def fake_sa_req_list(self): "service_vendor": "vyos", "resource": "routes", "context": { - "requester": "device_orch" + "requester": "device_orch", + "logging_context": {} }, "resource_type": "firewall" }, @@ -265,8 +285,8 @@ def _fake_resource_data(self): 'stitching_cidr': '192.168.0.0/28', 'destination_cidr': '192.168.0.0/28', 'stitching_mac': '00:0a:95:9d:68:16', - 'provider_interface_index': 'provider_interface_index', - 'stitching_interface_index': 'stitching_interface_index', + 'provider_interface_index': '2', + 'stitching_interface_index': '3', 'mgmt_ip': '172.24.4.5', 'source_cidrs': ['1.2.3.4/24'], 'gateway_ip': '1.2.3.4' @@ -300,13 +320,13 @@ def _fake_firewall_obj(self): } return firewall -""" Implements a fake event class for firewall for - process framework to use -""" +class FakeEventFirewall(object): + """ Implements a fake event class for firewall for + process framework to use + """ -class FakeEventFirewall(object): def __init__(self): fo = FakeObjects() kwargs = fo._fake_resource_data() @@ -323,13 +343,13 @@ def __init__(self): 'resource_data': kwargs} self.id = 'dummy' -""" Implements a fake event class for generic config for - process framework to use -""" +class FakeEventGenericConfig(object): + """ Implements a fake event class for generic config for + process framework to use + """ -class FakeEventGenericConfig(object): def __init__(self): fo = FakeObjects() kwargs = fo._fake_resource_data() @@ -344,3 +364,13 @@ def __init__(self): 'host': fo.host, 'resource_data': kwargs} self.id = 'dummy' + + +class FakeEventGetNotifications(object): + """ Implements a fake event class for notifications functionality + for the process framework to use + + """ + + def __init__(self): + self.data = {'dummy_data': 'dummy_value'} diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/test_data/lb_test_data.py b/gbpservice/contrib/tests/unit/nfp/configurator/test_data/lb_test_data.py similarity index 86% rename from gbpservice/neutron/tests/unit/nfp/configurator/test_data/lb_test_data.py rename to gbpservice/contrib/tests/unit/nfp/configurator/test_data/lb_test_data.py index 69c2b5617f..49f261e158 100644 --- a/gbpservice/neutron/tests/unit/nfp/configurator/test_data/lb_test_data.py +++ b/gbpservice/contrib/tests/unit/nfp/configurator/test_data/lb_test_data.py @@ -419,7 +419,9 @@ def __init__(self): self.data = { 'context': {'notification_data': {}, 'resource': 'context_resource', - 'agent_info': {'service_vendor': '' + 'agent_info': {'service_vendor': '', + 'context': {}, + 'resource': '' } }, 'vip': fo._get_vip_object()[0], @@ -453,58 +455,90 @@ class AssertionData(object): timeout = 30 delete_vip_url = ('http://192.168.100.149:1234/backend/' 'bck:6350c0fd-07f8-46ff-b797-62acd23760de') - create_vip_data = ('{"frnt:7a755739-1bbb-4211-9130-b6c82d9169a5": {' - '"provider_interface_mac": "aa:bb:cc:dd:ee:ff", ' - '"bind": "42.0.0.14:22", ' - '"default_backend": ' - '"bck:6350c0fd-07f8-46ff-b797-62acd23760de", ' - '"option": {"tcplog": true}, ' - '"mode": "tcp"}}') + + create_vip_data = {"frnt:7a755739-1bbb-4211-9130-b6c82d9169a5": + {"option": {"tcplog": True}, + "bind": "42.0.0.14:22", + "mode": "tcp", + "default_backend": + "bck:6350c0fd-07f8-46ff-b797-62acd23760de", + "provider_interface_mac": "aa:bb:cc:dd:ee:ff" + } + } + create_vip_url = 'http://192.168.100.149:1234/frontend' create_vip_resources = 'backend/bck:6350c0fd-07f8-46ff-b797-62acd23760de' - update_vip_data = ('{"provider_interface_mac": "aa:bb:cc:dd:ee:ff", ' - '"bind": "42.0.0.14:22", ' - '"default_backend": ' - '"bck:6350c0fd-07f8-46ff-b797-62acd23760de", ' - '"option": {"tcplog": true}, ' - '"mode": "tcp"}') + + update_vip_data = {"option": {"tcplog": True}, + "bind": "42.0.0.14:22", + "mode": "tcp", + "default_backend": + "bck:6350c0fd-07f8-46ff-b797-62acd23760de", + "provider_interface_mac": "aa:bb:cc:dd:ee:ff" + } + update_vip_url = ('http://192.168.100.149:1234/frontend/frnt:' '7a755739-1bbb-4211-9130-b6c82d9169a5') - update_pool_data = ('{"server": ' - '{"srvr:4910851f-4af7-4592-ad04-08b508c6fa21": ' - '["42.0.0.11:80", "weight 1", ' - '"check inter 10s fall 3"]}, "balance": "roundrobin", ' - '"mode": "tcp", "timeout": {"check": "10s"}, ' - '"option": {}}') + + update_pool_data = {"mode": "tcp", + "balance": "roundrobin", + "option": {}, + "timeout": {"check": "10s"}, + "server": { + "srvr:4910851f-4af7-4592-ad04-08b508c6fa21": + ["42.0.0.11:80", "weight 1", + "check inter 10s fall 3"] + }, + } + update_pool_url = ('http://192.168.100.149:1234/backend/bck:' '6350c0fd-07f8-46ff-b797-62acd23760de') - create_member_data = ('{"timeout": {}, "server": ' - '{"srvr:4910851f-4af7-4592-ad04-08b508c6fa21": ' - '["42.0.0.11:80", "weight 1", ' - '"check inter 10s fall 3"], "resource": []}}') + + create_member_data = {"timeout": {}, + "server": + { + "srvr:4910851f-4af7-4592-ad04-08b508c6fa21": + ["42.0.0.11:80", "weight 1", + "check inter 10s fall 3"], + "resource": [] + } + } create_member_url = ('http://192.168.100.149:1234/backend/bck:' '6350c0fd-07f8-46ff-b797-62acd23760de') - delete_member_data = '{"timeout": {}, "server": {"resource": []}}' + + delete_member_data = {"timeout": {}, + "server": {"resource": []} + } + delete_member_url = ('http://192.168.100.149:1234/backend/bck:' '6350c0fd-07f8-46ff-b797-62acd23760de') - update_member_data = ('{"timeout": {}, "server": ' - '{"srvr:4910851f-4af7-4592-ad04-08b508c6fa21": ' - '["42.0.0.11:80", "weight 1", ' - '"check inter 10s fall 3"], "resource": []}}') + + update_member_data = create_member_data update_member_url = ('http://192.168.100.149:1234/backend/bck:' '6350c0fd-07f8-46ff-b797-62acd23760de') - create_hm_data = ('{"timeout": {"check": "10s"}, "server": ' - '{"srvr:4910851f-4af7-4592-ad04-08b508c6fa21": [], ' - '"resource": []}}') + + create_hm_data = {"timeout": {"check": "10s"}, + "server": + { + "srvr:4910851f-4af7-4592-ad04-08b508c6fa21": [], + "resource": [] + } + } + create_hm_url = ('http://192.168.100.149:1234/backend/bck:' '6350c0fd-07f8-46ff-b797-62acd23760de') - delete_hm_data = ('{"timeout": {}, "server": {"srvr:' - '4910851f-4af7-4592-ad04-08b508c6fa21": [], ' - '"resource": []}}') + + delete_hm_data = {"timeout": {}, + "server": + { + "srvr:4910851f-4af7-4592-ad04-08b508c6fa21": [], + "resource": [] + } + } + delete_hm_url = ('http://192.168.100.149:1234/backend/bck:' '6350c0fd-07f8-46ff-b797-62acd23760de') - update_hm_data = ('{"timeout": {"check": "10s"}, "server": ' - '{"srvr:4910851f-4af7-4592-ad04-08b508c6fa21": [], ' - '"resource": []}}') + + update_hm_data = create_hm_data update_hm_url = ('http://192.168.100.149:1234/backend/bck:' '6350c0fd-07f8-46ff-b797-62acd23760de') diff --git a/gbpservice/contrib/tests/unit/nfp/configurator/test_data/nfp_service_test_data.py b/gbpservice/contrib/tests/unit/nfp/configurator/test_data/nfp_service_test_data.py new file mode 100644 index 0000000000..42a0b0e19c --- /dev/null +++ b/gbpservice/contrib/tests/unit/nfp/configurator/test_data/nfp_service_test_data.py @@ -0,0 +1,42 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class FakeObjects(object): + """ Implements fake objects for assertion. + + """ + + sc = 'sc' + conf = 'conf' + context = 'APIcontext' + kwargs = {'vmid': 'vmid'} + rpcmgr = 'rpcmgr' + drivers = 'drivers' + + +class FakeEventNfpService(object): + """ Implements a fake event class for generic config for + process framework to use + + """ + + def __init__(self): + self.data = { + 'context': { + 'resource': 'heat', + 'notification_data': {}, + 'resource_type': 'firewall', + 'service_vendor': 'vyos', + 'context': 'APIcontext'}, + 'resource_data': 'some data'} + self.id = 'dummy' diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/test_data/vpn_test_data.py b/gbpservice/contrib/tests/unit/nfp/configurator/test_data/vpn_test_data.py similarity index 99% rename from gbpservice/neutron/tests/unit/nfp/configurator/test_data/vpn_test_data.py rename to gbpservice/contrib/tests/unit/nfp/configurator/test_data/vpn_test_data.py index 0723997441..4f5dd20119 100644 --- a/gbpservice/neutron/tests/unit/nfp/configurator/test_data/vpn_test_data.py +++ b/gbpservice/contrib/tests/unit/nfp/configurator/test_data/vpn_test_data.py @@ -13,7 +13,6 @@ """ Implements fake objects for assertion. """ -import json class VPNTestData(object): @@ -27,6 +26,7 @@ def __init__(self): self.context_device = {'notification_data': {}, 'resource': 'interfaces'} self.sc = 'sc' + self.conf = 'conf' self.msg = 'msg' self.drivers = 'drivers' self.svc = {' ': ' '} @@ -528,9 +528,10 @@ def make_resource_data(self, operation=None, service_type=None): return self._create_vpnservice_obj() def fake_resource_data(self): - """ A sample keyword arguments for configurator + ''' + A sample keyword arguments for configurator Returns: resource_data - """ + ''' resource_data = {'service_type': 'vpn', 'vm_mgmt_ip': '192.168.20.75', 'mgmt_ip': '192.168.20.75', diff --git a/gbpservice/neutron/db/migration/alembic_migrations/versions/54ee8e8d205a_nfp_db.py b/gbpservice/neutron/db/migration/alembic_migrations/versions/54ee8e8d205a_nfp_db.py index 58355825af..74f6023837 100644 --- a/gbpservice/neutron/db/migration/alembic_migrations/versions/54ee8e8d205a_nfp_db.py +++ b/gbpservice/neutron/db/migration/alembic_migrations/versions/54ee8e8d205a_nfp_db.py @@ -13,15 +13,16 @@ # under the License. # -"""nfp_db +""" nfp_db + Revision ID: 54ee8e8d205a -Revises: 3791adbf0045 +Revises: 31b399f08b1c """ # revision identifiers, used by Alembic. revision = '54ee8e8d205a' -down_revision = '3791adbf0045' +down_revision = '31b399f08b1c' from alembic import op @@ -154,25 +155,6 @@ def upgrade(): sa.PrimaryKeyConstraint('network_function_instance_id', 'data_port_id') ) - op.create_table( - 'nfp_network_function_device_interfaces', - sa.Column('tenant_id', sa.String(length=255), nullable=True), - sa.Column('id', sa.String(length=36), nullable=False), - sa.Column('plugged_in_port_id', sa.String(length=36), nullable=True), - sa.Column('interface_position', - sa.Integer(), - nullable=True), - sa.Column('mapped_real_port_id', sa.String(length=36), nullable=True), - sa.Column('network_function_device_id', sa.String(length=36), nullable=True), - sa.ForeignKeyConstraint(['plugged_in_port_id'], - ['nfp_port_infos.id'], - ondelete='SET NULL'), - sa.ForeignKeyConstraint(['network_function_device_id'], - ['nfp_network_function_devices.id'], - ondelete='SET NULL'), - sa.PrimaryKeyConstraint('id') - ) - def downgrade(): pass diff --git a/gbpservice/nfp/service_plugins/loadbalancer/__init__.py b/gbpservice/neutron/tests/unit/nfp/base_configurator/controllers/__init__.py similarity index 100% rename from gbpservice/nfp/service_plugins/loadbalancer/__init__.py rename to gbpservice/neutron/tests/unit/nfp/base_configurator/controllers/__init__.py diff --git a/gbpservice/neutron/tests/unit/nfp/base_configurator/api/v1/controllers/test_controller.py b/gbpservice/neutron/tests/unit/nfp/base_configurator/controllers/test_controller.py similarity index 97% rename from gbpservice/neutron/tests/unit/nfp/base_configurator/api/v1/controllers/test_controller.py rename to gbpservice/neutron/tests/unit/nfp/base_configurator/controllers/test_controller.py index 90b2fa18e9..e99b6584bf 100644 --- a/gbpservice/neutron/tests/unit/nfp/base_configurator/api/v1/controllers/test_controller.py +++ b/gbpservice/neutron/tests/unit/nfp/base_configurator/controllers/test_controller.py @@ -19,7 +19,11 @@ import webtest import zlib -from gbpservice.nfp.base_configurator.api import root_controller +from gbpservice.nfp.pecan import constants + +setattr(pecan, 'mode', constants.base) + +from gbpservice.nfp.pecan.api import root_controller ERROR = 'error' UNHANDLED = 'unhandled' diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/agents/test_vpn_agent.py b/gbpservice/neutron/tests/unit/nfp/configurator/agents/test_vpn_agent.py deleted file mode 100644 index 4861abb8c4..0000000000 --- a/gbpservice/neutron/tests/unit/nfp/configurator/agents/test_vpn_agent.py +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from gbpservice.neutron.tests.unit.nfp.configurator.test_data import \ - vpn_test_data -from gbpservice.nfp.configurator.agents import vpn -from gbpservice.nfp.configurator.drivers.vpn.vyos import vyos_vpn_driver - -import mock -import unittest - -""" -Implements test cases for RPC manager methods of vpn agent -""" - - -class VPNaasEventHandlerTestCase(unittest.TestCase): - def __init__(self, *args, **kwargs): - super(VPNaasEventHandlerTestCase, self).__init__(*args, **kwargs) - self.conf = 'conf' - self.dict_obj = vpn_test_data.VPNTestData() - self.handler = vpn.VPNaasEventHandler(self.dict_obj.sc, - self.dict_obj.drivers) - self.ev = vpn_test_data.FakeEvent() - self.driver = vyos_vpn_driver.VpnaasIpsecDriver(self.conf) - - def test_handle_event(self): - ''' - Test to handle the vpn agent's vpnservice_updated method to - handle various vpn operations - - ''' - with mock.patch.object(self.handler, - '_get_driver', - return_value=self.dict_obj.drivers),\ - mock.patch.object(self.driver, 'vpnservice_updated') as ( - mock_vpnservice_updated): - self.handler._vpnservice_updated(self.ev, self.driver) - mock_vpnservice_updated.assert_called_with(self.ev.data['context'], - self.ev.data[ - 'resource_data']) - -if __name__ == '__main__': - unittest.main() diff --git a/gbpservice/neutron/tests/unit/nfp/configurator/modules/test_configurator.py b/gbpservice/neutron/tests/unit/nfp/configurator/modules/test_configurator.py deleted file mode 100644 index b2a2503ba4..0000000000 --- a/gbpservice/neutron/tests/unit/nfp/configurator/modules/test_configurator.py +++ /dev/null @@ -1,341 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import unittest - -from oslo_log import log as logging - -from gbpservice.neutron.tests.unit.nfp.configurator.test_data import ( - fw_test_data as fo) -from gbpservice.nfp.configurator.agents import firewall as fw -from gbpservice.nfp.configurator.agents import generic_config as gc -from gbpservice.nfp.configurator.lib import demuxer as demuxer_lib -from gbpservice.nfp.configurator.modules import configurator as cfgr - -LOG = logging.getLogger(__name__) - -STATUS_ACTIVE = "ACTIVE" - -""" Tests RPC manager class of configurator - -""" - - -class ConfiguratorRpcManagerTestCase(unittest.TestCase): - - def __init__(self, *args, **kwargs): - super(ConfiguratorRpcManagerTestCase, self).__init__(*args, **kwargs) - self.fo = fo.FakeObjects() - - @mock.patch(__name__ + '.fo.FakeObjects.conf') - @mock.patch(__name__ + '.fo.FakeObjects.sc') - def _get_ConfiguratorRpcManager_object(self, sc, conf): - """ Retrieves RPC manager object of configurator. - - :param sc: mocked service controller object of process model framework - :param conf: mocked OSLO configuration file - - Returns: object of configurator's RPC manager. - - """ - - cm = cfgr.ConfiguratorModule(sc) - demuxer = demuxer_lib.ServiceAgentDemuxer() - rpc_mgr = cfgr.ConfiguratorRpcManager(sc, cm, conf, demuxer) - return sc, conf, rpc_mgr - - def _get_GenericConfigRpcManager_object(self, conf, sc): - """ Retrieves RPC manager object of generic config agent. - - :param sc: mocked service controller object of process model framework - :param conf: mocked OSLO configuration file - - Returns: object of generic config's RPC manager - and service controller. - - """ - - agent = gc.GenericConfigRpcManager(sc, conf) - return agent, sc - - @mock.patch(__name__ + '.fo.FakeObjects.drivers') - def _get_GenericConfigEventHandler_object(self, sc, rpcmgr, drivers): - """ Retrieves event handler object of generic configuration. - - :param sc: mocked service controller object of process model framework - :param rpcmgr: object of configurator's RPC manager - :param drivers: list of driver objects for firewall agent - - Returns: object of generic config's event handler - - """ - - agent = gc.GenericConfigEventHandler(sc, drivers, rpcmgr) - return agent - - def _get_FWaasRpcManager_object(self, conf, sc): - """ Retrieves RPC manager object of firewall agent. - - :param sc: mocked service controller object of process model framework - :param conf: mocked OSLO configuration file - - Returns: object of firewall's RPC manager and service controller - - """ - - agent = fw.FWaasRpcManager(sc, conf) - return agent, sc - - def _test_network_device_config(self, operation, method, batch=False): - """ Tests generic config APIs - - :param operation: create/delete - :param method: CONFIGURE_ROUTES/CLEAR_ROUTES/ - CONFIGURE_INTERFACES/CLEAR_INTERFACES - :param batch: True or False. Indicates if the - request is a batch request - - Returns: none - - """ - - sc, conf, rpc_mgr = self._get_ConfiguratorRpcManager_object() - agent, sc = self._get_GenericConfigRpcManager_object(conf, sc) - - request_data = {'batch': { - 'request_data_actual': ( - self.fo.fake_request_data_generic_bulk()), - 'request_data_expected': ( - self.fo.fake_request_data_generic_bulk())}, - 'single': { - 'request_data_actual': ( - (self.fo.fake_request_data_generic_single( - routes=True) - if 'ROUTES' in method - else self.fo.fake_request_data_generic_single())), - 'request_data_expected': ( - (self.fo.fake_request_data_generic_single( - routes=True) - if 'ROUTES' in method - else self.fo.fake_request_data_generic_single()))} - } - if batch: - request_data_actual, request_data_expected = ( - request_data['batch'].values()) - else: - request_data_actual, request_data_expected = ( - request_data['single'].values()) - - with mock.patch.object( - sc, 'new_event', return_value='foo') as mock_sc_event, \ - mock.patch.object(sc, 'post_event') as mock_sc_rpc_event, \ - mock.patch.object(rpc_mgr, - '_get_service_agent_instance', - return_value=agent): - - if operation == 'create': - rpc_mgr.create_network_function_device_config( - self.fo.context, request_data_actual) - elif operation == 'delete': - rpc_mgr.delete_network_function_device_config( - self.fo.context, request_data_actual) - - context = request_data_expected['info']['context'] - - agent_info = {} - agent_info.update( - {'resource': request_data_expected['config'][0][ - 'resource'], - 'resource_type': request_data_expected['info'][ - 'service_type'], - 'service_vendor': request_data_expected['info'][ - 'service_vendor'], - 'context': context, - 'notification_data': {} - }) - resource_data = request_data_expected['config'][0]['resource_data'] - if batch: - sa_req_list = self.fo.fake_sa_req_list() - if operation == 'delete': - sa_req_list[0]['method'] = 'clear_interfaces' - sa_req_list[1]['method'] = 'clear_routes' - args_dict = { - 'sa_req_list': sa_req_list, - 'notification_data': {} - } - else: - args_dict = {'context': agent_info, - 'resource_data': resource_data} - mock_sc_event.assert_called_with(id=method, - data=args_dict, key=None) - mock_sc_rpc_event.assert_called_with('foo') - - def _test_fw_event_creation(self, operation): - """ Tests firewall APIs - - :param operation: CREATE_FIREWALL/UPDATE_FIREWALL/DELETE_FIREWALL - - Returns: none - - """ - - sc, conf, rpc_mgr = self._get_ConfiguratorRpcManager_object() - agent, sc = self._get_FWaasRpcManager_object(conf, sc) - arg_dict = {'context': self.fo.fw_context, - 'firewall': self.fo._fake_firewall_obj(), - 'host': self.fo.host} - method = {'CREATE_FIREWALL': 'create_network_function_config', - 'UPDATE_FIREWALL': 'update_network_function_config', - 'DELETE_FIREWALL': 'delete_network_function_config'} - request_data = self.fo.fake_request_data_fw() - with mock.patch.object(sc, 'new_event', return_value='foo') as ( - mock_sc_event), \ - mock.patch.object(sc, 'post_event') as mock_sc_rpc_event, \ - mock.patch.object(rpc_mgr, - '_get_service_agent_instance', - return_value=agent): - getattr(rpc_mgr, method[operation])(self.fo.fw_context, - request_data) - - mock_sc_event.assert_called_with(id=operation, - data=arg_dict, key=None) - mock_sc_rpc_event.assert_called_with('foo') - - def _test_notifications(self): - """ Tests response path notification APIs - - Returns: none - - """ - - sc, conf, rpc_mgr = self._get_ConfiguratorRpcManager_object() - agent = self._get_GenericConfigEventHandler_object(sc, rpc_mgr) - - data = "PUT ME IN THE QUEUE!" - with mock.patch.object(sc, 'new_event', return_value='foo') as ( - mock_new_event),\ - mock.patch.object(sc, 'stash_event') as mock_poll_event: - - agent.notify._notification(data) - - mock_new_event.assert_called_with(id='STASH_EVENT', - key='STASH_EVENT', - data=data) - mock_poll_event.assert_called_with('foo') - - def test_configure_routes_configurator_api(self): - """ Implements test case for configure routes API - - Returns: none - - """ - - method = "CONFIGURE_ROUTES" - operation = 'create' - self._test_network_device_config(operation, method) - - def test_clear_routes_configurator_api(self): - """ Implements test case for clear routes API - - Returns: none - - """ - - method = "CLEAR_ROUTES" - operation = 'delete' - self._test_network_device_config(operation, method) - - def test_configure_interfaces_configurator_api(self): - """ Implements test case for configure interfaces API - - Returns: none - - """ - - method = "CONFIGURE_INTERFACES" - operation = 'create' - self._test_network_device_config(operation, method) - - def test_clear_interfaces_configurator_api(self): - """ Implements test case for clear interfaces API - - Returns: none - - """ - - method = "CLEAR_INTERFACES" - operation = 'delete' - self._test_network_device_config(operation, method) - - def test_configure_bulk_configurator_api(self): - """ Implements test case for bulk configure request API - - Returns: none - - """ - - method = "PROCESS_BATCH" - operation = 'create' - self._test_network_device_config(operation, method, True) - - def test_clear_bulk_configurator_api(self): - """ Implements test case for bulk clear request API - - Returns: none - - """ - - method = "PROCESS_BATCH" - operation = 'delete' - self._test_network_device_config(operation, method, True) - - def test_create_firewall_configurator_api(self): - """ Implements test case for create firewall API - - Returns: none - - """ - - self._test_fw_event_creation('CREATE_FIREWALL') - - def test_update_firewall_configurator_api(self): - """ Implements test case for update firewall API - - Returns: none - - """ - - self._test_fw_event_creation('UPDATE_FIREWALL') - - def test_delete_firewall_configurator_api(self): - """ Implements test case for delete firewall API - - Returns: none - - """ - - self._test_fw_event_creation('DELETE_FIREWALL') - - def test_get_notifications_generic_configurator_api(self): - """ Implements test case for get notifications API - of configurator - - Returns: none - - """ - - self._test_notifications() - - -if __name__ == '__main__': - unittest.main() diff --git a/gbpservice/neutron/tests/unit/nfp/orchestrator/test_heat_driver.py b/gbpservice/neutron/tests/unit/nfp/orchestrator/test_heat_driver.py index 6ee96f25a1..4ed9785cba 100644 --- a/gbpservice/neutron/tests/unit/nfp/orchestrator/test_heat_driver.py +++ b/gbpservice/neutron/tests/unit/nfp/orchestrator/test_heat_driver.py @@ -19,8 +19,9 @@ from gbpclient.v2_0 import client as gbp_client from gbpservice.neutron.tests.unit.nfp.orchestrator import mock_dicts -from gbpservice.nfp.orchestrator.config_drivers\ - import heat_client as heat_client +from gbpservice.nfp.core import log as nfp_logging +from gbpservice.nfp.orchestrator.config_drivers import ( + heat_client as heat_client) from gbpservice.nfp.orchestrator.config_drivers import heat_driver from neutronclient.v2_0 import client as neutron_client @@ -118,9 +119,11 @@ def test_get_heat_client(self, mock_obj): keystone_client.auth_token = True self.heat_driver_obj._assign_admin_user_to_project = mock.Mock( return_value=None) - resource_owner_tenant_id = '8ae6701128994ab281dde6b92207bb19' + nfp_logging.get_logging_context = mock.Mock( + return_value={'auth_token': '7fd6701128994ab281ccb6b92207bb15'}) + tenant_id = '8ae6701128994ab281dde6b92207bb19' heat_client_obj = self.heat_driver_obj._get_heat_client( - resource_owner_tenant_id, tenant_id=None) + tenant_id) self.assertIsNotNone(heat_client_obj) @mock.patch.object(identity_client, "Client") @@ -315,6 +318,8 @@ def test_delete_config(self, mock_obj, heat_get_mock_obj, heat_get_mock_obj.return_value = MockStackObject('DELETE_COMPLETE') self.heat_driver_obj._assign_admin_user_to_project = mock.Mock( return_value=None) + nfp_logging.get_logging_context = mock.Mock( + return_value={'auth_token': '7fd6701128994ab281ccb6b92207bb15'}) instance = mock_obj.return_value instance.auth_token = True @@ -329,6 +334,8 @@ def test_is_config_complete(self, mock_obj, heat_get_mock_obj): tenant_id = '8ae6701128994ab281dde6b92207bb19' self.heat_driver_obj._assign_admin_user_to_project = mock.Mock( return_value=None) + nfp_logging.get_logging_context = mock.Mock( + return_value={'auth_token': '7fd6701128994ab281ccb6b92207bb15'}) self.heat_driver_obj.loadbalancer_post_stack_create = mock.Mock( return_value=None) heat_get_mock_obj.return_value = MockStackObject( @@ -348,6 +355,8 @@ def test_is_config_delete_complete(self, identity_mock_obj, tenant_id = '8ae6701128994ab281dde6b92207bb19' self.heat_driver_obj._assign_admin_user_to_project = mock.Mock( return_value=None) + nfp_logging.get_logging_context = mock.Mock( + return_value={'auth_token': '7fd6701128994ab281ccb6b92207bb15'}) heat_get_mock_obj.return_value = MockStackObject( 'DELETE_COMPLETE') identity_mock_obj.return_value.auth_token = "1234" @@ -487,6 +496,8 @@ def test_update( 'CREATE_COMPLETE') self.heat_driver_obj._assign_admin_user_to_project = mock.Mock( return_value=None) + nfp_logging.get_logging_context = mock.Mock( + return_value={'auth_token': '7fd6701128994ab281ccb6b92207bb15'}) auth_token = 'dasddasda' resource_owner_tenant_id = '8ae6701128994ab281dde6b92207bb19' provider = self.mock_dict.provider_ptg @@ -537,6 +548,8 @@ def test_handle_consumer_ptg_operations( 'CREATE_COMPLETE') self.heat_driver_obj._assign_admin_user_to_project = mock.Mock( return_value=None) + nfp_logging.get_logging_context = mock.Mock( + return_value={'auth_token': '7fd6701128994ab281ccb6b92207bb15'}) service_details = {} service_details['service_profile'] = self.mock_dict.service_profile @@ -576,6 +589,8 @@ def test_handle_policy_target_operations( 'CREATE_COMPLETE') self.heat_driver_obj._assign_admin_user_to_project = mock.Mock( return_value=None) + nfp_logging.get_logging_context = mock.Mock( + return_value={'auth_token': '7fd6701128994ab281ccb6b92207bb15'}) service_details = {} service_details['service_profile'] = self.mock_dict.lb_service_profile @@ -616,6 +631,8 @@ def test_apply_config( 'CREATE_COMPLETE') self.heat_driver_obj._assign_admin_user_to_project = mock.Mock( return_value=None) + nfp_logging.get_logging_context = mock.Mock( + return_value={'auth_token': '7fd6701128994ab281ccb6b92207bb15'}) service_details = {} service_details['service_profile'] = self.mock_dict.service_profile diff --git a/gbpservice/nfp/base_configurator/api/setup.py b/gbpservice/nfp/base_configurator/api/setup.py deleted file mode 100644 index e6042b44f2..0000000000 --- a/gbpservice/nfp/base_configurator/api/setup.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# -*- coding: utf-8 -*- -try: - import setuptools -except ImportError: - import ez_setup - ez_setup.use_setuptools() - import setuptools - -setuptools.setup( - name='api', - version='0.1', - description='', - author='', - author_email='', - install_requires=[ - "pecan", - ], - test_suite='api', - zip_safe=False, - include_package_data=True, - packages=setuptools.find_packages(exclude=['ez_setup']) -) diff --git a/gbpservice/nfp/base_configurator/api/v1/controllers/__init__.py b/gbpservice/nfp/base_configurator/controllers/__init__.py similarity index 73% rename from gbpservice/nfp/base_configurator/api/v1/controllers/__init__.py rename to gbpservice/nfp/base_configurator/controllers/__init__.py index 2f9fe51b03..cded74e180 100644 --- a/gbpservice/nfp/base_configurator/api/v1/controllers/__init__.py +++ b/gbpservice/nfp/base_configurator/controllers/__init__.py @@ -12,20 +12,19 @@ import pecan -import controller +from gbpservice.nfp.base_configurator.controllers import controller -"""This class forwards HTTP request to controller class. -This class create an object of Controller class with appropriate -parameter according to the path of HTTP request. According to the -parameter passed to Controller class it sends an RPC call/cast to -configurator. - -""" +class ControllerResolver(object): + """This class forwards HTTP request to controller class. -class ControllerResolver(object): + This class create an object of Controller class with appropriate + parameter according to the path of HTTP request. According to the + parameter passed to Controller class it sends an RPC call/cast to + configurator. + """ create_network_function_device_config = controller.Controller( "create_network_function_device_config") delete_network_function_device_config = controller.Controller( @@ -41,22 +40,19 @@ class ControllerResolver(object): get_notifications = controller.Controller("get_notifications") -""" This class forwards HTTP requests starting with /v1/nfp. - -All HTTP requests with path starting from /v1 -land here. This class forward request with path starting from /v1/nfp -to ControllerResolver. - -""" +class V1Controller(object): + """ This class forwards HTTP requests starting with /v1/nfp. + All HTTP requests with path starting from /v1 + land here. This class forward request with path starting from /v1/nfp + to ControllerResolver. -class V1Controller(object): + """ nfp = ControllerResolver() @pecan.expose() def get(self): - # TODO(blogan): decide what exactly should be here, if anything return {'versions': [{'status': 'CURRENT', 'updated': '2014-12-11T00:00:00Z', 'id': 'v1'}]} diff --git a/gbpservice/nfp/base_configurator/api/v1/controllers/controller.py b/gbpservice/nfp/base_configurator/controllers/controller.py similarity index 72% rename from gbpservice/nfp/base_configurator/api/v1/controllers/controller.py rename to gbpservice/nfp/base_configurator/controllers/controller.py index 949ca14bdc..d472e625e9 100644 --- a/gbpservice/nfp/base_configurator/api/v1/controllers/controller.py +++ b/gbpservice/nfp/base_configurator/controllers/controller.py @@ -18,7 +18,7 @@ import subprocess import time -from gbpservice.nfp.base_configurator.api.base_controller import BaseController +from gbpservice.nfp.pecan import base_controller LOG = logging.getLogger(__name__) TOPIC = 'configurator' @@ -26,20 +26,20 @@ SUCCESS_RESULTS = ['unhandled', 'success'] FAILURE = 'failure' -"""Implements all the APIs Invoked by HTTP requests. - -Implements following HTTP methods. - -get - -post - -""" notifications = [] cache_ips = set() -class Controller(BaseController): +class Controller(base_controller.BaseController): + + """Implements all the APIs Invoked by HTTP requests. + + Implements following HTTP methods. + -get + -post + """ def __init__(self, method_name): try: self.method_name = method_name @@ -50,7 +50,7 @@ def __init__(self, method_name): str(err).capitalize()) LOG.error(msg) self.vm_port = '8080' - self.max_retries = 24 + self.max_retries = 60 def _push_notification(self, context, result, config_data, service_type): global notifications @@ -188,64 +188,6 @@ def post(self, **body): error_data = self._format_description(msg) return jsonutils.dumps(error_data) - @pecan.expose(method='PUT', content_type='application/json') - def put(self, **body): - """Method of REST server to handle all the put requests. - - This method sends an RPC cast to configurator according to the - HTTP request. - - :param body: This method excepts dictionary as a parameter in HTTP - request and send this dictionary to configurator with RPC cast. - - Returns: None - - """ - - try: - global cache_ips - global notifications - body = None - if pecan.request.is_body_readable: - body = pecan.request.json_body - - # Assuming config list will have only one element - config_data = body['config'][0] - context = body['info']['context'] - service_type = body['info']['service_type'] - resource = config_data['resource'] - - if 'device_ip' in context: - msg = ("PUTTING DATA TO VM :: %s" % body) - LOG.info(msg) - device_ip = context['device_ip'] - ip = str(device_ip) - is_vm_reachable = self._verify_vm_reachability(ip, - self.vm_port) - if is_vm_reachable: - requests.post( - 'http://' + ip + ':' + self.vm_port + '/v1/nfp/' + - self.method_name, data=jsonutils.dumps(body)) - else: - raise Exception('VM is not reachable') - cache_ips.add(device_ip) - else: - if (resource in NFP_SERVICE_LIST): - result = "unhandled" - self._push_notification(context, - result, config_data, service_type) - else: - result = "error" - self._push_notification(context, - result, config_data, service_type) - except Exception as err: - pecan.response.status = 400 - msg = ("Failed to serve HTTP post request %s %s." - % (self.method_name, str(err).capitalize())) - LOG.error(msg) - error_data = self._format_description(msg) - return jsonutils.dumps(error_data) - def _format_description(self, msg): """This methgod formats error description. diff --git a/gbpservice/nfp/bin/nfp_configurator b/gbpservice/nfp/bin/nfp_configurator deleted file mode 100755 index 5a8fee0a25..0000000000 --- a/gbpservice/nfp/bin/nfp_configurator +++ /dev/null @@ -1,113 +0,0 @@ -#!/bin/sh - -PATH=/sbin:/usr/sbin:/bin:/usr/bin -DESC="NFP CONFIGURATOR agent" -PROJECT_NAME=nfp -NAME=${PROJECT_NAME}_configurator -DAEMON_ARGS="--config-file=/etc/nfp_configurator.ini" -#!/bin/sh -if [ -z "${DAEMON}" ] ; then - DAEMON=/usr/bin/${PROJECT_NAME} -fi -PIDFILE=/var/run/${PROJECT_NAME}/${NAME}.pid -if [ -z "${SCRIPTNAME}" ] ; then - SCRIPTNAME=/etc/init.d/${NAME} -fi -if [ -z "${SYSTEM_USER}" ] ; then - SYSTEM_USER=root -fi -if [ -z "${SYSTEM_USER}" ] ; then - SYSTEM_GROUP=root -fi -if [ "${SYSTEM_USER}" != "root" ] ; then - STARTDAEMON_CHUID="--chuid ${SYSTEM_USER}:${SYSTEM_GROUP}" -fi -if [ -z "${CONFIG_FILE}" ] ; then - CONFIG_FILE=/etc/init/nfp_configurator.conf -fi -LOGFILE=/var/log/${PROJECT_NAME}/${NAME}.log -DAEMON_ARGS="${DAEMON_ARGS} --config-file=${CONFIG_FILE}" - -# If ran as root, create /var/lock/X, /var/run/X, /var/lib/X and /var/log/X as needed -if [ `whoami` = "root" ] ; then - for i in lock run log; do - mkdir -p /var/$i/${PROJECT_NAME} - chown ${SYSTEM_USER} /var/$i/${PROJECT_NAME} - done -fi - -# This defines init_is_upstart which we use later on (+ more...) -. /lib/lsb/init-functions - -# Manage log options: logfile and/or syslog, depending on user's choosing -#[ -r /etc/default/$NAME ] && . /etc/default/$NAME -DAEMON_ARGS="$DAEMON_ARGS --log-file=$LOGFILE" - - -do_start() { - start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --startas $DAEMON \ - --test > /dev/null || return 1 - start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --startas $DAEMON \ - -- $DAEMON_ARGS || return 2 -} - -do_stop() { - start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE - RETVAL=$? - rm -f $PIDFILE - return "$RETVAL" -} - -do_systemd_start() { - exec $DAEMON $DAEMON_ARGS -} - -case "$1" in -start) - init_is_upstart > /dev/null 2>&1 && exit 1 - log_daemon_msg "Starting $DESC" "$NAME" - do_start - case $? in - 0|1) log_end_msg 0 ;; - 2) log_end_msg 1 ;; - esac -;; -stop) - init_is_upstart > /dev/null 2>&1 && exit 0 - log_daemon_msg "Stopping $DESC" "$NAME" - do_stop - case $? in - 0|1) log_end_msg 0 ;; - 2) log_end_msg 1 ;; - esac -;; -status) - status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $? -;; -systemd-start) - do_systemd_start -;; -restart|force-reload) - init_is_upstart > /dev/null 2>&1 && exit 1 - log_daemon_msg "Restarting $DESC" "$NAME" - do_stop - case $? in - 0|1) - do_start - case $? in - 0) log_end_msg 0 ;; - 1) log_end_msg 1 ;; # Old process is still running - *) log_end_msg 1 ;; # Failed to start - esac - ;; - *) log_end_msg 1 ;; # Failed to stop - esac -;; -*) - echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload|systemd-start}" >&2 - exit 3 -;; -esac - -exit 0 - diff --git a/gbpservice/nfp/bin/nfp_configurator.conf b/gbpservice/nfp/bin/nfp_configurator.conf deleted file mode 100755 index 0730377174..0000000000 --- a/gbpservice/nfp/bin/nfp_configurator.conf +++ /dev/null @@ -1,23 +0,0 @@ -description "NFP Configurator" - -start on runlevel [2345] -stop on runlevel [!2345] - -respawn - -chdir /var/run - -pre-start script - mkdir -p /var/run/nfp - chown root:root /var/run/nfp - mkdir -p /var/log/nfp - chown root:root /var/log/nfp - touch /var/log/nfp/nfp_configurator.log - chown root:root /var/log/nfp/nfp_configurator.log - chmod +x /var/log/nfp/nfp_configurator.log -end script - -exec start-stop-daemon --start --exec /usr/bin/nfp -- \ - --config-file=/etc/nfp_configurator.ini \ - --log-file=/var/log/nfp/nfp_configurator.log - diff --git a/gbpservice/nfp/bin/nfp_configurator.ini b/gbpservice/nfp/bin/nfp_configurator.ini deleted file mode 100644 index 52279c94a4..0000000000 --- a/gbpservice/nfp/bin/nfp_configurator.ini +++ /dev/null @@ -1,30 +0,0 @@ -[DEFAULT] -policy_file=/etc/policy.json -debug=False -rabbit_password=guest -rabbit_userid=guest -rabbit_hosts=127.0.0.1 -# #Example 192.168.2.107:5672 -rabbit_port=5672 -rabbit_host= -kombu_reconnect_delay=1.0 -control_exchange = openstack - -rabbit_use_ssl=False - -rabbit_virtual_host=/ -workers=2 -nfp_modules_path=gbpservice.nfp.configurator.modules -reportstate_interval=10 -periodic_interval=2 - -log_forward_ip_address= -log_forward_port=514 -log_level=debug - - -#logging_exception_prefix = %(color)s%(asctime)s.%(msecs)03d TRACE %(name)s ^[[01;35m%(instance)s^[[00m -#logging_debug_format_suffix = ^[[00;33mfrom (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d^[[00m -#logging_default_format_string = %(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [^[[00;36m-%(color)s] ^[[01;35m%(instance)s%(color)s%(message)s^[[00m -#logging_context_format_string = %(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [^[[01;36m%(request_id)s ^[[00;36m%(user_name)s %(project_id)s%(color)s] ^[[01;35m%(instance)s%(color)s%(message)s^[[00m - diff --git a/gbpservice/nfp/bin/startup/redhat/nfp_pecan.service b/gbpservice/nfp/bin/startup/redhat/nfp_pecan.service index e1d7e6237d..446466fb4b 100644 --- a/gbpservice/nfp/bin/startup/redhat/nfp_pecan.service +++ b/gbpservice/nfp/bin/startup/redhat/nfp_pecan.service @@ -3,7 +3,7 @@ Description=One Convergence NFP Rest Server After=syslog.target network.target [Service] -ExecStart=sudo pecan serve /usr/local/lib/python2.7/dist-packages/gbpservice/nfp/configurator/api/config.py +ExecStart=sudo pecan serve /usr/local/lib/python2.7/dist-packages/gbpservice/contrib/nfp/configurator/api/config.py Restart=on-abort [Install] diff --git a/gbpservice/nfp/config/mode_shift.conf b/gbpservice/nfp/config/mode_shift.conf index 6721bc7df4..94b8f0dc30 100644 --- a/gbpservice/nfp/config/mode_shift.conf +++ b/gbpservice/nfp/config/mode_shift.conf @@ -7,6 +7,8 @@ FROM=advanced TO=enterprise +DEVSTACK_SRC_DIR= + #----------------------------------# # enterprise mode specific options # #----------------------------------# @@ -22,3 +24,4 @@ DOCKER_IMAGES_URL=http://192.168.100.50/docker_images/ # Optional parameter AsavQcow2Image= +PaloAltoQcow2Image= diff --git a/gbpservice/nfp/configurator/api/base_controller.py b/gbpservice/nfp/configurator/api/base_controller.py deleted file mode 100644 index 1a434c88d5..0000000000 --- a/gbpservice/nfp/configurator/api/base_controller.py +++ /dev/null @@ -1,49 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -import oslo_serialization.jsonutils as jsonutils - -from pecan.hooks import HookController -from pecan.hooks import PecanHook -from pecan import rest -import zlib - -LOG = logging.getLogger(__name__) - - -class ZipperHook(PecanHook): - - def before(self, state): - if state.request.method.upper() != 'GET': - try: - zippedBody = state.request.body - body = zlib.decompress(zippedBody) - body = jsonutils.loads(body) - state.request.json_body = body - state.request.content_type = "application/json" - except Exception as e: - msg = ("Failed to process data ,Reason: %s" % (e)) - LOG.error(msg) - - def after(self, state): - data = state.response.body - state.response.body = zlib.compress(data) - state.response.content_type = "application/octet-stream" - - -class BaseController(rest.RestController, HookController): - """This is root controller that forward the request to __init__.py - file inside controller folder inside v1 - - """ - __hooks__ = [ZipperHook()] diff --git a/gbpservice/nfp/configurator/api/config.py b/gbpservice/nfp/configurator/api/config.py deleted file mode 100644 index 28498d2070..0000000000 --- a/gbpservice/nfp/configurator/api/config.py +++ /dev/null @@ -1,87 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Server Specific Configurations -server = { - 'port': '8080', - 'host': '0.0.0.0' -} - -# Pecan Application Configurations -app = { - 'root': 'root_controller.RootController', - 'modules': ['v1'], - 'debug': True, - 'errors': { - 404: '/error/404', - '__force_dict__': True - } -} - -logging = { - 'root': {'level': 'INFO', 'handlers': ['console', 'logfile']}, - 'loggers': { - 'pecanlog': {'level': 'INFO', - 'handlers': ['console', 'logfile'], - 'propagate': False}, - 'pecan': {'level': 'INFO', - 'handlers': ['console', 'logfile'], - 'propagate': False}, - 'py.warnings': {'handlers': ['console', 'logfile']}, - '__force_dict__': True - }, - 'handlers': { - 'console': { - 'level': 'INFO', - 'class': 'logging.StreamHandler', - 'formatter': 'color' - }, - 'logfile': { - 'class': 'logging.FileHandler', - 'filename': '/var/log/nfp/nfp_pecan.log', - 'level': 'INFO' - } - }, - 'formatters': { - 'simple': { - 'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]' - '[%(threadName)s] %(message)s') - }, - 'color': { - '()': 'pecan.log.ColorFormatter', - 'format': ('%(asctime)s [%(padded_color_levelname)s] [%(name)s]' - '[%(threadName)s] %(message)s'), - '__force_dict__': True - } - } -} - -cloud_services = [ - {'service_name': 'configurator', - 'topic': 'configurator', - 'reporting_interval': '10', # in seconds - 'apis': ['CONFIGURATION'] - }, - - {'service_name': 'visibility', - 'topic': 'visibility', - 'reporting_interval': '10', # in seconds - 'apis': ['VISIBILITY'] - }, -] - -# Custom Configurations must be in Python dictionary format:: -# -# foo = {'bar':'baz'} -# -# All configurations are accessible at:: -# pecan.conf diff --git a/gbpservice/nfp/configurator/drivers/firewall/vyos/vyos_fw_driver.py b/gbpservice/nfp/configurator/drivers/firewall/vyos/vyos_fw_driver.py deleted file mode 100644 index 1df9aee6a4..0000000000 --- a/gbpservice/nfp/configurator/drivers/firewall/vyos/vyos_fw_driver.py +++ /dev/null @@ -1,702 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ast -import requests - -from gbpservice.nfp.core import log as nfp_logging - -from oslo_serialization import jsonutils - -from gbpservice.nfp.configurator.drivers.base import base_driver -from gbpservice.nfp.configurator.drivers.firewall.vyos import ( - vyos_fw_constants as const) -from gbpservice.nfp.configurator.lib import constants as common_const -from gbpservice.nfp.configurator.lib import fw_constants as fw_const - -LOG = nfp_logging.getLogger(__name__) - - -""" Firewall generic configuration driver for handling device -configuration requests. - -""" - - -class FwGenericConfigDriver(base_driver.BaseDriver): - """ - Driver class for implementing firewall configuration - requests from Orchestrator. - """ - - def __init__(self): - pass - - def _configure_static_ips(self, resource_data): - """ Configure static IPs for provider and stitching interfaces - of service VM. - - Issues REST call to service VM for configuration of static IPs. - - :param resource_data: a dictionary of firewall rules and objects - send by neutron plugin - - Returns: SUCCESS/Failure message with reason. - - """ - - static_ips_info = dict( - provider_ip=resource_data.get('provider_ip'), - provider_cidr=resource_data.get('provider_cidr'), - provider_mac=resource_data.get('provider_mac'), - stitching_ip=resource_data.get('stitching_ip'), - stitching_cidr=resource_data.get('stitching_cidr'), - stitching_mac=resource_data.get('stitching_mac'), - provider_interface_position=resource_data.get( - 'provider_interface_index'), - stitching_interface_position=resource_data.get( - 'stitching_interface_index')) - mgmt_ip = resource_data['mgmt_ip'] - - url = const.request_url % (mgmt_ip, - self.port, - 'add_static_ip') - data = jsonutils.dumps(static_ips_info) - - msg = ("Initiating POST request to add static IPs for primary " - "service at: %r" % mgmt_ip) - LOG.info(msg) - try: - resp = requests.post(url, data, timeout=self.timeout) - except requests.exceptions.ConnectionError as err: - msg = ("Failed to establish connection to primary service at: " - "%r. ERROR: %r" % - (mgmt_ip, str(err).capitalize())) - LOG.error(msg) - return msg - except requests.exceptions.RequestException as err: - msg = ("Unexpected ERROR happened while adding " - "static IPs for primary service at: %r. " - "ERROR: %r" % - (mgmt_ip, str(err).capitalize())) - LOG.error(msg) - return msg - - try: - result = resp.json() - except ValueError as err: - msg = ("Unable to parse response, invalid JSON. URL: " - "%r. %r" % (url, str(err).capitalize())) - LOG.error(msg) - return msg - if not result['status']: - msg = ("Error adding static IPs. URL: %r. Reason: %s." % - (url, result['reason'])) - LOG.error(msg) - return msg - - msg = ("Static IPs successfully added.") - LOG.info(msg) - return common_const.STATUS_SUCCESS - - def configure_interfaces(self, context, resource_data): - """ Configure interfaces for the service VM. - - Calls static IP configuration function and implements - persistent rule addition in the service VM. - Issues REST call to service VM for configuration of interfaces. - - :param context: neutron context - :param resource_data: a dictionary of firewall rules and objects - send by neutron plugin - - Returns: SUCCESS/Failure message with reason. - - """ - - mgmt_ip = resource_data['mgmt_ip'] - - try: - result_log_forward = self._configure_log_forwarding( - const.request_url, mgmt_ip, self.port) - except Exception as err: - msg = ("Failed to configure log forwarding for service at %s. " - "Error: %s" % (mgmt_ip, err)) - LOG.error(msg) - return msg - else: - if result_log_forward == common_const.UNHANDLED: - pass - elif result_log_forward != common_const.STATUS_SUCCESS: - msg = ("Failed to configure log forwarding for service at %s. " - "Error: %s" % (mgmt_ip, err)) - LOG.error(msg) - return result_log_forward - else: - msg = ("Configured log forwarding for service at %s. " - "Result: %s" % (mgmt_ip, result_log_forward)) - LOG.info(msg) - - try: - result_static_ips = self._configure_static_ips(resource_data) - except Exception as err: - msg = ("Failed to add static IPs. Error: %s" % err) - LOG.error(msg) - return msg - else: - if result_static_ips != common_const.STATUS_SUCCESS: - return result_static_ips - else: - msg = ("Added static IPs. Result: %s" % result_static_ips) - LOG.info(msg) - - rule_info = dict( - provider_mac=resource_data['provider_mac'], - stitching_mac=resource_data['stitching_mac']) - - url = const.request_url % (mgmt_ip, - self.port, 'add_rule') - data = jsonutils.dumps(rule_info) - msg = ("Initiating POST request to add persistent rule to primary " - "service at: %r" % mgmt_ip) - LOG.info(msg) - try: - resp = requests.post(url, data, timeout=self.timeout) - except requests.exceptions.ConnectionError as err: - msg = ("Failed to establish connection to primary service at: " - "%r. ERROR: %r" % - (mgmt_ip, str(err).capitalize())) - LOG.error(msg) - return msg - except requests.exceptions.RequestException as err: - msg = ("Unexpected ERROR happened while adding " - "persistent rule of primary service at: %r. ERROR: %r" % - (mgmt_ip, str(err).capitalize())) - LOG.error(msg) - return msg - - try: - result = resp.json() - except ValueError as err: - msg = ("Unable to parse response, invalid JSON. URL: " - "%r. %r" % (url, str(err).capitalize())) - LOG.error(msg) - return msg - if not result['status']: - msg = ("Error adding persistent rule. URL: %r" % url) - LOG.error(msg) - return msg - - msg = ("Persistent rule successfully added.") - LOG.info(msg) - return common_const.STATUS_SUCCESS - - def _clear_static_ips(self, resource_data): - """ Clear static IPs for provider and stitching - interfaces of the service VM. - - Issues REST call to service VM for deletion of static IPs. - - :param resource_data: a dictionary of firewall rules and objects - send by neutron plugin - - Returns: SUCCESS/Failure message with reason. - - """ - - static_ips_info = dict( - provider_ip=resource_data.get('provider_ip'), - provider_cidr=resource_data.get('provider_cidr'), - provider_mac=resource_data.get('provider_mac'), - stitching_ip=resource_data.get('stitching_ip'), - stitching_cidr=resource_data.get('stitching_cidr'), - stitching_mac=resource_data.get('stitching_mac')) - mgmt_ip = resource_data['mgmt_ip'] - - url = const.request_url % (mgmt_ip, - self.port, - 'del_static_ip') - data = jsonutils.dumps(static_ips_info) - - msg = ("Initiating POST request to remove static IPs for primary " - "service at: %r" % mgmt_ip) - LOG.info(msg) - try: - resp = requests.delete(url, data=data, timeout=self.timeout) - except requests.exceptions.ConnectionError as err: - msg = ("Failed to establish connection to primary service at: " - "%r. ERROR: %r" % - (mgmt_ip, str(err).capitalize())) - LOG.error(msg) - return msg - except requests.exceptions.RequestException as err: - msg = ("Unexpected ERROR happened while removing " - "static IPs for primary service at: %r. ERROR: %r" % - (mgmt_ip, str(err).capitalize())) - LOG.error(msg) - return msg - - try: - result = resp.json() - except ValueError as err: - msg = ("Unable to parse response, invalid JSON. URL: " - "%r. %r" % (url, str(err).capitalize())) - LOG.error(msg) - return msg - if not result['status']: - msg = ("Error removing static IPs. URL: %r. Reason: %s." % - (url, result['reason'])) - LOG.error(msg) - return msg - - msg = ("Static IPs successfully removed.") - LOG.info(msg) - return common_const.STATUS_SUCCESS - - def clear_interfaces(self, context, resource_data): - """ Clear interfaces for the service VM. - - Calls static IP clear function and implements - persistent rule deletion in the service VM. - Issues REST call to service VM for deletion of interfaces. - - :param context: neutron context - :param resource_data: a dictionary of firewall rules and objects - send by neutron plugin - - Returns: SUCCESS/Failure message with reason. - - """ - - try: - result_static_ips = self._clear_static_ips(resource_data) - except Exception as err: - msg = ("Failed to remove static IPs. Error: %s" % err) - LOG.error(msg) - return msg - else: - if result_static_ips != common_const.STATUS_SUCCESS: - return result_static_ips - else: - msg = ("Successfully removed static IPs. " - "Result: %s" % result_static_ips) - LOG.info(msg) - - rule_info = dict( - provider_mac=resource_data['provider_mac'], - stitching_mac=resource_data['stitching_mac']) - - mgmt_ip = resource_data['mgmt_ip'] - - msg = ("Initiating DELETE persistent rule.") - LOG.info(msg) - url = const.request_url % (mgmt_ip, - self.port, - 'delete_rule') - - try: - data = jsonutils.dumps(rule_info) - resp = requests.delete(url, data=data, timeout=self.timeout) - except requests.exceptions.ConnectionError as err: - msg = ("Failed to establish connection to service at: %r. " - "ERROR: %r" % - (mgmt_ip, str(err).capitalize())) - LOG.error(msg) - raise Exception(err) - except requests.exceptions.RequestException as err: - msg = ("Unexpected ERROR happened while deleting " - "persistent rule of service at: %r. ERROR: %r" % - (mgmt_ip, str(err).capitalize())) - LOG.error(msg) - raise Exception(err) - - try: - result = resp.json() - except ValueError as err: - msg = ("Unable to parse response, invalid JSON. URL: " - "%r. %r" % (url, str(err).capitalize())) - LOG.error(msg) - raise Exception(msg) - if not result['status'] or resp.status_code not in [200, 201, 202]: - msg = ("Error deleting persistent rule. URL: %r" % url) - LOG.error(msg) - raise Exception(msg) - msg = ("Persistent rule successfully deleted.") - LOG.info(msg) - return common_const.STATUS_SUCCESS - - def configure_routes(self, context, resource_data): - """ Configure routes for the service VM. - - Issues REST call to service VM for configuration of routes. - - :param context: neutron context - :param resource_data: a dictionary of firewall rules and objects - send by neutron plugin - - Returns: SUCCESS/Failure message with reason. - - """ - - mgmt_ip = resource_data.get('mgmt_ip') - source_cidrs = resource_data.get('source_cidrs') - gateway_ip = resource_data.get('gateway_ip') - - # REVISIT(VK): This was all along bad way, don't know why at all it - # was done like this. - - url = const.request_url % (mgmt_ip, self.port, - 'add-source-route') - active_configured = False - route_info = [] - for source_cidr in source_cidrs: - route_info.append({'source_cidr': source_cidr, - 'gateway_ip': gateway_ip}) - data = jsonutils.dumps(route_info) - msg = ("Initiating POST request to configure route of " - "primary service at: %r" % mgmt_ip) - LOG.info(msg) - try: - resp = requests.post(url, data=data, timeout=self.timeout) - except requests.exceptions.ConnectionError as err: - msg = ("Failed to establish connection to service at: " - "%r. ERROR: %r" % (mgmt_ip, str(err).capitalize())) - LOG.error(msg) - return msg - except requests.exceptions.RequestException as err: - msg = ("Unexpected ERROR happened while configuring " - "route of service at: %r ERROR: %r" % - (mgmt_ip, str(err).capitalize())) - LOG.error(msg) - return msg - - if resp.status_code in common_const.SUCCESS_CODES: - message = jsonutils.loads(resp.text) - if message.get("status", False): - msg = ("Route configured successfully for VYOS" - " service at: %r" % mgmt_ip) - LOG.info(msg) - active_configured = True - else: - msg = ("Configure source route failed on service with" - " status %s %s" - % (resp.status_code, message.get("reason", None))) - LOG.error(msg) - return msg - - msg = ("Route configuration status : %r " - % (active_configured)) - LOG.info(msg) - if active_configured: - return common_const.STATUS_SUCCESS - else: - return ("Failed to configure source route. Response code: %s." - "Response Content: %r" % (resp.status_code, resp.content)) - - def clear_routes(self, context, resource_data): - """ Clear routes for the service VM. - - Issues REST call to service VM for deletion of routes. - - :param context: neutron context - :param resource_data: a dictionary of firewall rules and objects - send by neutron plugin - - Returns: SUCCESS/Failure message with reason. - - """ - - mgmt_ip = resource_data.get('mgmt_ip') - source_cidrs = resource_data.get('source_cidrs') - - # REVISIT(VK): This was all along bad way, don't know why at all it - # was done like this. - active_configured = False - url = const.request_url % (mgmt_ip, self.port, - 'delete-source-route') - route_info = [] - for source_cidr in source_cidrs: - route_info.append({'source_cidr': source_cidr}) - data = jsonutils.dumps(route_info) - msg = ("Initiating DELETE route request to primary service at: %r" - % mgmt_ip) - LOG.info(msg) - try: - resp = requests.delete(url, data=data, timeout=self.timeout) - except requests.exceptions.ConnectionError as err: - msg = ("Failed to establish connection to primary service at: " - " %r. ERROR: %r" % (mgmt_ip, err)) - LOG.error(msg) - return msg - except requests.exceptions.RequestException as err: - msg = ("Unexpected ERROR happened while deleting " - " route of service at: %r ERROR: %r" - % (mgmt_ip, err)) - LOG.error(msg) - return msg - - if resp.status_code in common_const.SUCCESS_CODES: - active_configured = True - - msg = ("Route deletion status : %r " - % (active_configured)) - LOG.info(msg) - if active_configured: - return common_const.STATUS_SUCCESS - else: - return ("Failed to delete source route. Response code: %s." - "Response Content: %r" % (resp.status_code, resp.content)) - -""" Firewall as a service driver for handling firewall -service configuration requests. - -We initialize service type in this class because agent loads -class object only for those driver classes that have service type -initialized. Also, only this driver class is exposed to the agent. - -""" - - -class FwaasDriver(FwGenericConfigDriver): - service_type = fw_const.SERVICE_TYPE - service_vendor = const.VYOS - - def __init__(self, conf): - self.conf = conf - self.timeout = const.REST_TIMEOUT - self.host = self.conf.host - self.port = const.CONFIGURATION_SERVER_PORT - super(FwaasDriver, self).__init__() - - def _get_firewall_attribute(self, firewall): - """ Retrieves management IP from the firewall resource received - - :param firewall: firewall dictionary containing rules - and other objects - - Returns: management IP - - """ - - description = ast.literal_eval(firewall["description"]) - if not description.get('vm_management_ip'): - msg = ("Failed to find vm_management_ip.") - LOG.debug(msg) - raise - - if not description.get('service_vendor'): - msg = ("Failed to find service_vendor.") - LOG.debug(msg) - raise - - msg = ("Found vm_management_ip %s." - % description['vm_management_ip']) - LOG.debug(msg) - return description['vm_management_ip'] - - def _print_exception(self, exception_type, err, - url, operation, response=None): - """ Abstract class for printing log messages - - :param exception_type: Name of the exception as a string - :param err: Either error of type Exception or error code - :param url: Service url - :param operation: Create, update or delete - :param response: Response content from Service VM - - """ - - if exception_type == 'ConnectionError': - msg = ("Error occurred while connecting to firewall " - "service at URL: %r. Firewall not %sd. %s. " - % (url, operation, str(err).capitalize())) - LOG.error(msg) - elif exception_type == 'RequestException': - msg = ("Unexpected error occurred while connecting to " - "firewall service at URL: %r. Firewall not %sd. %s" - % (url, operation, str(err).capitalize())) - LOG.error(msg) - elif exception_type == 'ValueError': - msg = ("Unable to parse the response. Invalid " - "JSON from URL: %r. Firewall not %sd. %s. %r" - % (url, operation, str(err).capitalize(), response)) - LOG.error(msg) - elif exception_type == 'UnexpectedError': - msg = ("Unexpected error occurred while connecting to service " - "at URL: %r. Firewall not %sd. %s. %r" - % (url, operation, str(err).capitalize(), response)) - LOG.error(msg) - elif exception_type == 'Failure': - msg = ("Firewall not %sd. URL: %r. Response " - "code from server: %r. %r" - % (operation, url, err, response)) - LOG.error(msg) - - def create_firewall(self, context, firewall, host): - """ Implements firewall creation - - Issues REST call to service VM for firewall creation - - :param context: Neutron context - :param firewall: Firewall resource object from neutron fwaas plugin - :param host: Name of the host machine - - Returns: SUCCESS/Failure message with reason. - - """ - - msg = ("Processing create firewall request in FWaaS Driver " - "for Firewall ID: %s." % firewall['id']) - LOG.debug(msg) - mgmt_ip = self._get_firewall_attribute(firewall) - url = const.request_url % (mgmt_ip, - self.port, - 'configure-firewall-rule') - msg = ("Initiating POST request for FIREWALL ID: %r Tenant ID:" - " %r. URL: %s" % (firewall['id'], firewall['tenant_id'], url)) - LOG.info(msg) - data = jsonutils.dumps(firewall) - try: - resp = requests.post(url, data, timeout=self.timeout) - except requests.exceptions.ConnectionError as err: - self._print_exception('ConnectionError', err, url, 'create') - raise requests.exceptions.ConnectionError(err) - except requests.exceptions.RequestException as err: - self._print_exception('RequestException', err, url, 'create') - raise requests.exceptions.RequestException(err) - - msg = ("POSTed the configuration to Service VM") - LOG.debug(msg) - if resp.status_code in common_const.SUCCESS_CODES: - try: - resp_payload = resp.json() - if resp_payload['config_success']: - msg = ("Configured Firewall successfully. URL: %s" - % url) - LOG.info(msg) - return common_const.STATUS_ACTIVE - else: - self._print_exception('Failure', - resp.status_code, url, - 'create', resp.content) - return common_const.STATUS_ERROR - except ValueError as err: - self._print_exception('ValueError', err, url, - 'create', resp.content) - return common_const.STATUS_ERROR - except Exception as err: - self._print_exception('UnexpectedError', err, url, - 'create', resp.content) - return common_const.STATUS_ERROR - else: - self._print_exception('Failure', resp.status_code, url, - 'create', resp.content) - return common_const.STATUS_ERROR - - def update_firewall(self, context, firewall, host): - """ Implements firewall updation - - Issues REST call to service VM for firewall updation - - :param context: Neutron context - :param firewall: Firewall resource object from neutron fwaas plugin - :param host: Name of the host machine - - Returns: SUCCESS/Failure message with reason. - - """ - - mgmt_ip = self._get_firewall_attribute(firewall) - url = const.request_url % (mgmt_ip, - self.port, - 'update-firewall-rule') - msg = ("Initiating UPDATE request. URL: %s" % url) - LOG.info(msg) - data = jsonutils.dumps(firewall) - try: - resp = requests.put(url, data=data, timeout=self.timeout) - except Exception as err: - self._print_exception('UnexpectedError', err, url, 'update') - raise Exception(err) - if resp.status_code == 200: - msg = ("Successful UPDATE request. URL: %s" % url) - LOG.info(msg) - return common_const.STATUS_ACTIVE - else: - self._print_exception('Failure', resp.status_code, url, - 'create', resp.content) - return common_const.STATUS_ERROR - - def delete_firewall(self, context, firewall, host): - """ Implements firewall deletion - - Issues REST call to service VM for firewall deletion - - :param context: Neutron context - :param firewall: Firewall resource object from neutron fwaas plugin - :param host: Name of the host machine - - Returns: SUCCESS/Failure message with reason. - - """ - - mgmt_ip = self._get_firewall_attribute(firewall) - url = const.request_url % (mgmt_ip, - self.port, - 'delete-firewall-rule') - msg = ("Initiating DELETE request. URL: %s" % url) - LOG.info(msg) - data = jsonutils.dumps(firewall) - try: - resp = requests.delete(url, data=data, timeout=self.timeout) - except requests.exceptions.ConnectionError as err: - self._print_exception('ConnectionError', err, url, 'delete') - raise requests.exceptions.ConnectionError(err) - except requests.exceptions.RequestException as err: - self._print_exception('RequestException', err, url, 'delete') - raise requests.exceptions.RequestException(err) - - if resp.status_code in common_const.SUCCESS_CODES: - # For now agent only check for ERROR. - try: - resp_payload = resp.json() - if resp_payload['delete_success']: - msg = ("Deleted Firewall successfully.") - LOG.info(msg) - return common_const.STATUS_DELETED - elif not resp_payload['delete_success'] and \ - resp_payload.get('message', '') == ( - const.INTERFACE_NOT_FOUND): - # VK: This is a special case. - msg = ("Firewall not deleted, as interface is not " - "available in firewall. Possibly got detached. " - " So marking this delete as success. URL: %r" - "Response Content: %r" % (url, resp.content)) - LOG.error(msg) - return common_const.STATUS_SUCCESS - else: - self._print_exception('Failure', - resp.status_code, url, - 'delete', resp.content) - return common_const.STATUS_ERROR - except ValueError as err: - self._print_exception('ValueError', err, url, - 'delete', resp.content) - return common_const.STATUS_ERROR - except Exception as err: - self._print_exception('UnexpectedError', err, url, - 'delete', resp.content) - return common_const.STATUS_ERROR - else: - self._print_exception('Failure', resp.status_code, url, - 'create', resp.content) - return common_const.STATUS_ERROR diff --git a/gbpservice/nfp/configurator/lib/vpn_constants.py b/gbpservice/nfp/configurator/lib/vpn_constants.py deleted file mode 100644 index f248469cb2..0000000000 --- a/gbpservice/nfp/configurator/lib/vpn_constants.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -DRIVERS_DIR = 'gbpservice.nfp.configurator.drivers.vpn' - -SERVICE_TYPE = 'vpn' -SERVICE_VENDOR = 'vyos' - -STATE_PENDING = 'PENDING_CREATE' -STATE_INIT = 'INIT' -STATE_ACTIVE = 'ACTIVE' -STATE_ERROR = 'ERROR' -NEUTRON = 'NEUTRON' - -STATUS_ACTIVE = "ACTIVE" -STATUS_DELETED = "DELETED" -STATUS_UPDATED = "UPDATED" -STATUS_ERROR = "ERROR" -STATUS_SUCCESS = "SUCCESS" - -CONFIGURATION_SERVER_PORT = 8888 -REST_TIMEOUT = 90 -request_url = "http://%s:%s/%s" -SUCCESS_CODES = [200, 201, 202, 203, 204] -ERROR_CODES = [400, 404, 500] - -VYOS = 'vyos' -SM_RPC_TOPIC = 'VPN-sm-topic' -VPN_RPC_TOPIC = "vpn_topic" -VPN_GENERIC_CONFIG_RPC_TOPIC = "vyos_vpn_topic" - -VPN_PLUGIN_TOPIC = 'vpn_plugin' -VPN_AGENT_TOPIC = 'vpn_agent' - -CONFIGURATION_SERVER_PORT = '8888' diff --git a/gbpservice/nfp/configurator/run.sh b/gbpservice/nfp/configurator/run.sh deleted file mode 100644 index 474eff82cb..0000000000 --- a/gbpservice/nfp/configurator/run.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -service rabbitmq-server start -screen -dmS "configurator" /usr/bin/python2 /usr/bin/nfp --config-file=/etc/nfp_configurator.ini --config-dir=/etc/nfp_config --log-file=/var/log/nfp/nfp_configurator.log -cd /usr/local/lib/python2.7/dist-packages/gbpservice/nfp/configurator/api/ -python setup.py develop -screen -dmS "pecan" pecan serve config.py -/bin/bash - diff --git a/gbpservice/nfp/core/controller.py b/gbpservice/nfp/core/controller.py index b891762d3e..c951d4d9bf 100644 --- a/gbpservice/nfp/core/controller.py +++ b/gbpservice/nfp/core/controller.py @@ -72,11 +72,13 @@ def _make_new_event(self, event): def get_event_handlers(self): return self._event_handlers - def register_events(self, event_descs): + def register_events(self, event_descs, module='', priority=0): """Register event handlers with core. """ # REVISIT (mak): change name to register_event_handlers() ? for event_desc in event_descs: - self._event_handlers.register(event_desc.id, event_desc.handler) + self._event_handlers.register( + event_desc.id, event_desc.handler, + module=module, priority=priority) def register_rpc_agents(self, agents): """Register rpc handlers with core. """ @@ -112,17 +114,18 @@ def post_event_graph(self, event): event.desc.pid = os.getpid() return event - def post_event(self, event): + def post_event(self, event, target=None): """Post an event. As a base class, it only does the descriptor preparation. NfpController class implements the required functionality. """ - handler = self._event_handlers.get_event_handler(event.id) + handler = self._event_handlers.get_event_handler(event.id, module=target) assert handler, "No handler registered for event %s" % (event.id) event.desc.type = nfp_event.SCHEDULE_EVENT event.desc.flag = nfp_event.EVENT_NEW event.desc.pid = os.getpid() + event.desc.target = target return event # REVISIT (mak): spacing=0, caller must explicitly specify @@ -374,7 +377,7 @@ def post_event_graph(self, event, graph_nodes): LOG.debug(message) self._manager.process_events([event]) - def post_event(self, event): + def post_event(self, event, target=None): """Post a new event into the system. If distributor(main) process posts an event, it @@ -387,7 +390,7 @@ def post_event(self, event): Returns: None """ - event = super(NfpController, self).post_event(event) + event = super(NfpController, self).post_event(event, target=target) message = "(event - %s) - New event" % (event.identify()) LOG.debug(message) if self.PROCESS_TYPE == "worker": diff --git a/gbpservice/nfp/core/event.py b/gbpservice/nfp/core/event.py index 6df8ae084c..d19bb7d204 100644 --- a/gbpservice/nfp/core/event.py +++ b/gbpservice/nfp/core/event.py @@ -186,6 +186,8 @@ def __init__(self, **kwargs): self.worker = kwargs.get('worker') # Polling descriptor of event self.poll_desc = kwargs.get('poll_desc') + # Target module to which this event must be delivered + self.target = None def from_desc(self, desc): self.type = desc.type @@ -283,7 +285,7 @@ def _log_meta(self, event_id, event_handler=None): else: return "(event_id - %s) - (event_handler - None)" % (event_id) - def register(self, event_id, event_handler): + def register(self, event_id, event_handler, module='', priority=0): """Registers a handler for event_id. Also fetches the decorated poll handlers if any @@ -304,20 +306,38 @@ def register(self, event_id, event_handler): spacing = 0 try: - self._event_desc_table[event_id].append( - (event_handler, poll_handler, spacing)) + try: + self._event_desc_table[event_id]['modules'][module].append( + (event_handler, poll_handler, spacing)) + except KeyError: + self._event_desc_table[event_id]['modules'][module] = [ + (event_handler, poll_handler, spacing)] + try: + self._event_desc_table[event_id]['priority'][priority].append( + (event_handler, poll_handler, spacing)) + except KeyError: + self._event_desc_table[event_id]['priority'][priority] = [ + (event_handler, poll_handler, spacing)] except KeyError: - self._event_desc_table[event_id] = [ + self._event_desc_table[event_id] = {'modules':{}, 'priority':{}} + self._event_desc_table[event_id]['modules'][module] = [ + (event_handler, poll_handler, spacing)] + self._event_desc_table[event_id]['priority'][priority] = [ (event_handler, poll_handler, spacing)] message = "%s - Registered handler" % ( self._log_meta(event_id, event_handler)) LOG.debug(message) - def get_event_handler(self, event_id): + def get_event_handler(self, event_id, module=None): """Get the handler for the event_id. """ eh = None try: - eh = self._event_desc_table[event_id][0][0] + if module: + eh = self._event_desc_table[event_id]['modules'][module][0][0] + else: + priorities = self._event_desc_table[event_id]['priority'].keys() + priority = max(priorities) + eh = self._event_desc_table[event_id]['priority'][priority][0][0] finally: message = "%s - Returning event handler" % ( self._log_meta(event_id, eh)) @@ -328,7 +348,9 @@ def get_poll_handler(self, event_id): """Get the poll handler for event_id. """ ph = None try: - ph = self._event_desc_table[event_id][0][1] + priorities = self._event_desc_table[event_id]['priority'].keys() + priority = max(priorities) + ph = self._event_desc_table[event_id]['priority'][priority][0][1] finally: message = "%s - Returning poll handler" % ( self._log_meta(event_id, ph)) diff --git a/gbpservice/nfp/core/worker.py b/gbpservice/nfp/core/worker.py index e2da1efca2..b9e7e4d993 100644 --- a/gbpservice/nfp/core/worker.py +++ b/gbpservice/nfp/core/worker.py @@ -102,7 +102,7 @@ def _process_event(self, event): """ if event.desc.type == nfp_event.SCHEDULE_EVENT: self._send_event_ack(event) - eh = self.event_handlers.get_event_handler(event.id) + eh = self.event_handlers.get_event_handler(event.id, module=event.desc.target) self.dispatch(eh.handle_event, event) elif event.desc.type == nfp_event.POLL_EVENT: self.dispatch(self._handle_poll_event, event) diff --git a/gbpservice/nfp/orchestrator/config_drivers/heat_driver.py b/gbpservice/nfp/orchestrator/config_drivers/heat_driver.py index c8df970260..09dd2000bf 100644 --- a/gbpservice/nfp/orchestrator/config_drivers/heat_driver.py +++ b/gbpservice/nfp/orchestrator/config_drivers/heat_driver.py @@ -246,6 +246,19 @@ def loadbalancer_post_stack_create(self, network_function_details): if port_info['port_model'] != nfp_constants.GBP_PORT: return + def _post_stack_create(self, nfp_context): + service_details = self.get_service_details_from_nfp_context( + nfp_context) + service_type = service_details['service_details']['service_type'] + + if service_type in [pconst.LOADBALANCER]: + logging_context = nfp_logging.get_logging_context() + auth_token = logging_context['auth_token'] + provider_tenant_id = nfp_context['tenant_id'] + provider = service_details['provider_ptg'] + self._create_policy_target_for_vip( + auth_token, provider_tenant_id, provider) + def _create_policy_target_for_vip(self, auth_token, provider_tenant_id, provider): provider_subnet = None @@ -820,6 +833,97 @@ def _create_node_config_data(self, auth_token, tenant_id, 'description'] = str(common_desc) nf_desc = str(firewall_desc) + elif service_type == pconst.VPN: + config_param_values['Subnet'] = ( + consumer_port['fixed_ips'][0]['subnet_id'] + if consumer_port else None) + l2p = self.gbp_client.get_l2_policy( + auth_token, provider['l2_policy_id']) + l3p = self.gbp_client.get_l3_policy( + auth_token, l2p['l3_policy_id']) + config_param_values['RouterId'] = l3p['routers'][0] + stitching_cidr = service_details['consumer_subnet']['cidr'] + mgmt_gw_ip = self._get_management_gw_ip(auth_token) + if not mgmt_gw_ip: + return None, None + + services_nsp = self.gbp_client.get_network_service_policies( + auth_token, + filters={'name': ['nfp_services_nsp']}) + if not services_nsp: + fip_nsp = { + 'network_service_policy': { + 'name': 'nfp_services_nsp', + 'description': 'nfp_implicit_resource', + 'shared': False, + 'tenant_id': tenant_id, + 'network_service_params': [ + {"type": "ip_pool", "value": "nat_pool", + "name": "vpn_svc_external_access"}] + } + } + nsp = self.gbp_client.create_network_service_policy( + auth_token, fip_nsp) + else: + nsp = services_nsp[0] + if not base_mode_support: + stitching_pts = self.gbp_client.get_policy_targets( + auth_token, + filters={'port_id': [consumer_port['id']]}) + if not stitching_pts: + LOG.error(_LE("Policy target is not created for the " + "stitching port")) + return None, None + stitching_ptg_id = ( + stitching_pts[0]['policy_target_group_id']) + else: + stitching_ptg_id = consumer['id'] + self.gbp_client.update_policy_target_group( + auth_token, stitching_ptg_id, + {'policy_target_group': { + 'network_service_policy_id': nsp['id']}}) + if not base_mode_support: + filters = {'port_id': consumer_port['id']} + stitching_port_fip = self.neutron_client.get_floating_ips( + auth_token, + filters)[0]['floating_ip_address'] + if not stitching_port_fip: + LOG.error(_LE("Floating IP for VPN Service has been " + "disassociated Manually")) + return None, None + try: + desc = ('fip=' + mgmt_ip + + ";tunnel_local_cidr=" + + provider_cidr + ";user_access_ip=" + + stitching_port_fip + ";fixed_ip=" + + consumer_port['fixed_ips'][0]['ip_address'] + + ';service_vendor=' + service_vendor + + ';stitching_cidr=' + stitching_cidr + + ';stitching_gateway=' + service_details[ + 'consumer_subnet']['gateway_ip'] + + ';mgmt_gw_ip=' + mgmt_gw_ip + + ';network_function_id=' + network_function['id']) + except Exception: + LOG.error(_LE("Problem in preparing description, some of " + "the fields might not have initialized")) + return None, None + stack_params['ServiceDescription'] = desc + siteconn_keys = self._get_site_conn_keys( + stack_template[resources_key], + is_template_aws_version, + 'OS::Neutron::IPsecSiteConnection') + for siteconn_key in siteconn_keys: + stack_template[resources_key][siteconn_key][ + properties_key]['description'] = str(common_desc) + + vpnservice_key = self._get_heat_resource_key( + stack_template[resources_key], + is_template_aws_version, + 'OS::Neutron::VPNService') + stack_template[resources_key][vpnservice_key][properties_key][ + 'description'] = str(common_desc) + + nf_desc = str(desc) if nf_desc: network_function['description'] = network_function[ @@ -1330,6 +1434,7 @@ def check_config_complete(self, nfp_context): if stack.stack_status == 'DELETE_FAILED': return failure_status elif stack.stack_status == 'CREATE_COMPLETE': + self._post_stack_create(nfp_context) return success_status elif stack.stack_status == 'UPDATE_COMPLETE': return success_status diff --git a/gbpservice/nfp/orchestrator/db/enterprise_migration/nfp_enterprise_db.py b/gbpservice/nfp/orchestrator/db/enterprise_migration/nfp_enterprise_db.py new file mode 100644 index 0000000000..720740c516 --- /dev/null +++ b/gbpservice/nfp/orchestrator/db/enterprise_migration/nfp_enterprise_db.py @@ -0,0 +1,54 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nfp_enterprise_db + +Revision ID: +Revises: +Create Date: 2016-07-25 07:28:39.063889 + +""" + +# revision identifiers, used by Alembic. +revision = '' +down_revision = '' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + op.create_table( + 'nfp_network_function_device_interfaces', + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('plugged_in_port_id', sa.String(length=36), nullable=True), + sa.Column('interface_position', + sa.Integer(), + nullable=True), + sa.Column('mapped_real_port_id', sa.String(length=36), nullable=True), + sa.Column('network_function_device_id', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['plugged_in_port_id'], + ['nfp_port_infos.id'], + ondelete='SET NULL'), + sa.ForeignKeyConstraint(['network_function_device_id'], + ['nfp_network_function_devices.id'], + ondelete='SET NULL'), + sa.PrimaryKeyConstraint('id') + ) + + +def downgrade(): + pass diff --git a/gbpservice/nfp/orchestrator/drivers/orchestration_driver.py b/gbpservice/nfp/orchestrator/drivers/orchestration_driver.py index 171b88ac41..27f802013c 100644 --- a/gbpservice/nfp/orchestrator/drivers/orchestration_driver.py +++ b/gbpservice/nfp/orchestrator/drivers/orchestration_driver.py @@ -56,10 +56,9 @@ class OrchestrationDriver(object): is launched for each Network Service Instance """ - def __init__(self, config, supports_device_sharing=True, + def __init__(self, config, supports_hotplug=True, max_interfaces=10): self.service_vendor = 'general' - self.supports_device_sharing = supports_device_sharing self.supports_hotplug = supports_hotplug self.maximum_interfaces = max_interfaces self.identity_handler = openstack_driver.KeystoneClient(config) @@ -143,9 +142,6 @@ def _decrement_stats_counter(self, metric, by=1): " '%(metric)s' by %(by)d") % {'metric': metric, 'by': by}) - def _is_device_sharing_supported(self): - return self.supports_device_sharing - def _create_management_interface(self, token, admin_tenant_id, device_data, network_handler): @@ -297,8 +293,6 @@ def _get_vendor_data_fast(self, token, {'image_name': image_name, 'error': e}) return None vendor_data = self._verify_vendor_data(image_name, metadata) - if not vendor_data: - return None return vendor_data def _update_self_with_vendor_data(self, vendor_data, attr): @@ -312,6 +306,7 @@ def _update_self_with_vendor_data(self, vendor_data, attr): {'attr': attr, 'default': attr_value}) def _update_vendor_data(self, device_data, token=None): + vendor_data = {} try: image_name = self._get_image_name(device_data) vendor_data = self._get_vendor_data(device_data, image_name) @@ -334,6 +329,7 @@ def _update_vendor_data(self, device_data, token=None): LOG.error(_LE("Error while getting metadata for image name:" "%(image_name)s, proceeding with default values"), {'image_name': image_name}) + return vendor_data def _update_vendor_data_fast(self, token, admin_tenant_id, image_name, device_data): @@ -393,120 +389,6 @@ def _get_device_service_types_map(self, token, devices, network_handler): device_service_types_map[device['id']].add(service_type) return device_service_types_map - def get_network_function_device_sharing_info(self, device_data): - """ Get filters for NFD sharing - - :param device_data: NFD data - :type device_data: dict - - :returns: None -- when device sharing is not supported - :returns: dict -- It has the following scheme - { - 'filters': { - 'key': 'value', - ... - } - } - - :raises: exceptions.IncompleteData - """ - - if ( - any(key not in device_data - for key in ['tenant_id', - 'service_details']) or - - type(device_data['service_details']) is not dict or - - any(key not in device_data['service_details'] - for key in ['service_vendor']) - ): - raise exceptions.IncompleteData() - - if not self._is_device_sharing_supported(): - return None - - return { - 'filters': { - 'tenant_id': [device_data['tenant_id']], - 'service_vendor': [device_data['service_details'][ - 'service_vendor']], - 'status': [nfp_constants.ACTIVE] - } - } - - @_set_network_handler - def select_network_function_device(self, devices, device_data, - network_handler=None): - """ Select a NFD which is eligible for sharing - - :param devices: NFDs - :type devices: list - :param device_data: NFD data - :type device_data: dict - - :returns: None -- when device sharing is not supported, or - when no device is eligible for sharing - :return: dict -- NFD which is eligible for sharing - - :raises: exceptions.IncompleteData - """ - - if ( - any(key not in device_data - for key in ['ports']) or - - type(device_data['ports']) is not list or - - any(key not in port - for port in device_data['ports'] - for key in ['id', - 'port_classification', - 'port_model']) or - - type(devices) is not list or - - any(key not in device - for device in devices - for key in ['interfaces_in_use']) - ): - raise exceptions.IncompleteData() - - token = self._get_token(device_data.get('token')) - if not token: - return None - image_name = self._get_image_name(device_data) - if image_name: - self._update_vendor_data(device_data, - device_data.get('token')) - if not self._is_device_sharing_supported(): - return None - - hotplug_ports_count = 1 # for provider interface (default) - if any(port['port_classification'] == nfp_constants.CONSUMER - for port in device_data['ports']): - hotplug_ports_count = 2 - - device_service_types_map = ( - self._get_device_service_types_map(token, devices, - network_handler)) - service_type = device_data['service_details']['service_type'] - for device in devices: - if ( - (device['interfaces_in_use'] + hotplug_ports_count) <= - self.maximum_interfaces - ): - if (service_type.lower() == nfp_constants.VPN.lower() and - service_type in device_service_types_map[ - device['id']]): - # Restrict multiple VPN services to share same device - # If nfd request service type is VPN and current filtered - # device already has VPN service instantiated, ignore this - # device and checks for next one - continue - return device - return None - def get_image_id(self, nova, token, admin_tenant_id, image_name): try: image_id = nova.get_image_id(token, admin_tenant_id, image_name) @@ -1164,32 +1046,24 @@ def unplug_network_function_device_interfaces(self, device_data, compute_policy=device_data['service_details']['device_type']) image_name = self._get_image_name(device_data) + vendor_data = {} if image_name: - self._update_vendor_data(device_data, - device_data.get('token')) + vendor_data = self._update_vendor_data(device_data, + device_data.get('token')) token = self._get_token(device_data.get('token')) if not token: return None - - executor = nfp_executor.TaskExecutor(jobs=1) - vendor_data_result = {} - tenant_id = device_data.get('tenant_id') - - executor.add_job('UPDATE_VENDOR_DATA', - self._update_vendor_data_fast, - token, tenant_id, image_name, device_data, - result_store=vendor_data_result) - executor.fire() - - vendor_data = vendor_data_result.get('result', None) if not vendor_data: LOG.warn(_LE('Failed to get vendor data for device deletion.')) - vendor_data = {} update_ifaces = [] - try: + supports_hotplug = True + if vendor_data: if vendor_data.get('supports_hotplug') == False: + supports_hotplug = False + try: + if supports_hotplug == False: if self.setup_mode.get(nfp_constants.APIC_MODE): data_port_ids = [] for port in device_data['ports']: diff --git a/gbpservice/nfp/orchestrator/drivers/sharing_driver.py b/gbpservice/nfp/orchestrator/drivers/sharing_driver.py new file mode 100644 index 0000000000..2fb561710b --- /dev/null +++ b/gbpservice/nfp/orchestrator/drivers/sharing_driver.py @@ -0,0 +1,90 @@ +from gbpservice.nfp.common import constants as nfp_constants +from gbpservice.nfp.common import exceptions +from gbpservice.nfp.orchestrator.drivers import orchestration_driver + + +class SharingDriver(orchestration_driver.OrchestrationDriver): + + @orchestration_driver._set_network_handler + def select_network_function_device(self, devices, + device_data, network_handler=None): + """ Select a NFD which is eligible for sharing + + :param devices: NFDs + :type devices: list + :param device_data: NFD data + :type device_data: dict + + :returns: None -- when device sharing is not supported, or + when no device is eligible for sharing + :return: dict -- NFD which is eligible for sharing + + :raises: exceptions.IncompleteData + """ + + if ( + any(key not in device_data + for key in ['ports']) or + + type(device_data['ports']) is not list or + + any(key not in port + for port in device_data['ports'] + for key in ['id', + 'port_classification', + 'port_model']) or + + type(devices) is not list or + + any(key not in device + for device in devices + for key in ['interfaces_in_use']) + ): + raise exceptions.IncompleteData() + + token = device_data['token'] + hotplug_ports_count = 1 # for provider interface (default) + if any(port['port_classification'] == nfp_constants.CONSUMER + for port in device_data['ports']): + hotplug_ports_count = 2 + + device_service_types_map = ( + self._get_device_service_types_map(token, devices, + network_handler)) + service_type = device_data['service_details']['service_type'] + for device in devices: + if ( + (device['interfaces_in_use'] + hotplug_ports_count) <= + self.maximum_interfaces + ): + if (service_type.lower() == nfp_constants.VPN.lower() and + service_type in device_service_types_map[ + device['id']]): + # Restrict multiple VPN services to share same device + # If nfd request service type is VPN and current filtered + # device already has VPN service instantiated, ignore this + # device and checks for next one + continue + admin_tenant_id = device_data['admin_tenant_id'] + image_name = super(SharingDriver, self)._get_image_name( + device_data) + vendor_data = super(SharingDriver, self)._get_vendor_data_fast( + token, + admin_tenant_id, + image_name, + device_data) + device['vendor_data'] = vendor_data + device['interfaces_in_use'] += hotplug_ports_count + return device + return None + + @orchestration_driver._set_network_handler + def get_managment_info(self, device_data, network_handler=None): + port_id = network_handler.get_port_id(device_data['token'], + device_data['mgmt_port_id'][ + 'id']) + managemt_info = super( + SharingDriver, self).get_neutron_port_details(network_handler, + device_data['token'], + port_id) + return managemt_info diff --git a/gbpservice/nfp/orchestrator/modules/device_orchestrator.py b/gbpservice/nfp/orchestrator/modules/device_orchestrator.py index ace9849fae..2624252f5e 100644 --- a/gbpservice/nfp/orchestrator/modules/device_orchestrator.py +++ b/gbpservice/nfp/orchestrator/modules/device_orchestrator.py @@ -65,7 +65,8 @@ def events_init(controller, config, device_orchestrator): for event in events: events_to_register.append( Event(id=event, handler=device_orchestrator)) - controller.register_events(events_to_register) + controller.register_events(events_to_register, + module='device_orchestrator') def nfp_module_init(controller, config): @@ -495,18 +496,6 @@ def _decrement_device_interface_count(self, device): def _get_orchestration_driver(self, service_vendor): return self.orchestration_driver - def _get_device_to_reuse(self, device_data, dev_sharing_info): - device_filters = dev_sharing_info['filters'] - orchestration_driver = self._get_orchestration_driver( - device_data['service_details']['service_vendor']) - - devices = self._get_network_function_devices(device_filters) - - device = orchestration_driver.select_network_function_device( - devices, - device_data) - return device - def _get_device_data(self, nfd_request): device_data = {} @@ -629,26 +618,6 @@ def create_network_function_device(self, event): device_data = self._prepare_device_data_from_nfp_context(nfp_context) - # dev_sharing_info = ( - # orchestration_driver.get_network_function_device_sharing_info( - # device_data)) - - # if dev_sharing_info: - # device = self._get_device_to_reuse(device_data, dev_sharing_info) - # if device: - # device = self._update_device_data(device, device_data) - - # # To handle case, when device sharing is supported but device not - # # exists to share, so create a new device. - # if dev_sharing_info and device: - # # Device is already active, no need to change status - # device['network_function_device_id'] = device['id'] - # self._create_event(event_id='DEVICE_HEALTHY', - # event_data=device, - # is_internal_event=True) - # LOG.info(_LI("Sharing existing device: %s(device)s for reuse"), - # {'device': device}) - # REVISIT(TODO): Removing sharing for cisco live demo if 0: pass else: @@ -693,7 +662,7 @@ def create_network_function_device(self, event): nfp_context['network_function_instance']['id']), 'network_function_device_id': device['id'] } - + nfp_context['event_desc'] = event.desc.to_dict() self._create_event(event_id='DEVICE_SPAWNING', event_data=nfp_context, is_poll_event=True, @@ -834,6 +803,22 @@ def device_up(self, event): for result in results: if result.result.lower() != 'success': return self._controller.event_complete(event, result='FAILED') + network_function_device = nfp_context['network_function_device'] + self._update_network_function_device_db( + network_function_device, nfp_constants.ACTIVE) + LOG.info(_LI( + "Device Configuration completed for device: %(device_id)s" + "Updated DB status to ACTIVE, Incremented device " + "reference count for %(device)s"), + {'device_id': network_function_device['id'], + 'device': network_function_device}) + + nfd_event = self._controller.new_event( + id='CREATE_NETWORK_FUNCTION_DEVICE', + key=nfp_context['network_function']['id'], + binding_key=nfp_context['service_chain_node']['id'], + desc_dict=nfp_context.pop('event_desc')) + self._controller.event_complete(nfd_event) self._post_configure_device_graph(nfp_context) self._controller.event_complete(event) @@ -1116,13 +1101,6 @@ def device_configuration_complete(self, event, result='SUCCESS'): if result.lower() == 'success': self._increment_device_ref_count(device) - self._update_network_function_device_db( - device, nfp_constants.ACTIVE) - LOG.info(_LI( - "Device Configuration completed for device: %(device_id)s" - "Updated DB status to ACTIVE, Incremented device " - "reference count for %(device)s"), - {'device_id': device['id'], 'device': device}) # Invoke event_complete for original event which is # CREATE_DEVICE_CONFIGURATION @@ -1267,7 +1245,7 @@ def _prepare_failure_case_device_data(self, nfp_context): def handle_plug_interface_failed(self, event): nfp_context = event.data device = self._prepare_failure_case_device_data(nfp_context) - status = nfp_constants.ERROR + status = nfp_constants.ACTIVE desc = "Failed to plug interfaces" self._update_network_function_device_db(device, status, desc) self._create_event(event_id='DEVICE_CREATE_FAILED', diff --git a/gbpservice/nfp/orchestrator/modules/service_orchestrator.py b/gbpservice/nfp/orchestrator/modules/service_orchestrator.py index abd179fc60..c19efc1519 100644 --- a/gbpservice/nfp/orchestrator/modules/service_orchestrator.py +++ b/gbpservice/nfp/orchestrator/modules/service_orchestrator.py @@ -80,7 +80,7 @@ def events_init(controller, config, service_orchestrator): for event in events: events_to_register.append( Event(id=event, handler=service_orchestrator)) - controller.register_events(events_to_register) + controller.register_events(events_to_register, module='service_orchestrator') def nfp_module_init(controller, config): @@ -854,23 +854,6 @@ def delete_user_config(self, event): event_data=request_data, is_poll_event=True, original_event=event) - def _get_network_function_instance_for_multi_service_sharing(self, - port_info): - network_function_instances = ( - self.db_handler.get_network_function_instances(self.db_session, - filters={})) - provider_port_id = None - for port in port_info: - if port['port_classification'] == 'provider': - provider_port_id = port['id'] - break - for network_function_instance in network_function_instances: - if (provider_port_id in network_function_instance['port_info'] and - network_function_instance['network_function_device_id'] - is not None): - return network_function_instance - return None - def create_network_function_instance(self, event): nfp_context = event.data @@ -893,11 +876,7 @@ def create_network_function_instance(self, event): # REVISIT(ashu): Only pick few chars from id name = '%s_%s' % (network_function['name'], network_function['id']) - network_function_instance = ( - self._get_network_function_instance_for_multi_service_sharing( - port_info)) - if network_function_instance: - port_info = [] + create_nfi_request = { 'name': name, 'tenant_id': network_function['tenant_id'], @@ -910,24 +889,6 @@ def create_network_function_instance(self, event): } nfi_db = self.db_handler.create_network_function_instance( self.db_session, create_nfi_request) - if network_function_instance: - port_info = [] - for port_id in network_function_instance['port_info']: - port_info.append(self.db_handler.get_port_info(self.db_session, - port_id)) - nfi = { - 'port_info': port_info - } - nfi_db = self.db_handler.update_network_function_instance( - self.db_session, nfi_db['id'], nfi) - nfd_data = {} - nfd_data['network_function_instance_id'] = nfi_db['id'] - nfd_data['network_function_device_id'] = ( - network_function_instance['network_function_device_id']) - self._create_event('DEVICE_ACTIVE', - event_data=nfd_data) - - return # Sending LogMeta Details to visibility self._report_logging_info(network_function, nfi_db, @@ -936,9 +897,16 @@ def create_network_function_instance(self, event): nfp_context['network_function_instance'] = nfi_db - LOG.info(_LI("[Event:CreateService]")) - self._create_event('CREATE_NETWORK_FUNCTION_DEVICE', - event_data=nfp_context) + LOG.info(_LI("[Event:CreateService]")), + binding_key = nfp_context['service_chain_node']['id'] + + ev = self._controller.new_event( + id='CREATE_NETWORK_FUNCTION_DEVICE', + data=nfp_context, + binding_key=binding_key, + key=network_function['id'], + serialize=True) + self._controller.post_event(ev) def handle_device_created(self, event): request_data = event.data @@ -1441,7 +1409,7 @@ def handle_user_config_applied(self, event): def handle_config_applied(self, event): nfp_context = event.data['nfp_context'] base_mode = nfp_context['base_mode'] - network_function_id = nfp_context['network_function']['id'] + network_function_id = event.data['network_function_id'] if base_mode: network_function = { 'status': nfp_constants.ACTIVE, @@ -1456,7 +1424,7 @@ def handle_config_applied(self, event): network_function_id}) else: network_function_instance_id = ( - nfp_context['network_function_instance']['id']) + event.data['network_function_instance_id']) if network_function_instance_id: nfi = { 'status': nfp_constants.ACTIVE, diff --git a/gbpservice/nfp/orchestrator/modules/sharing.py b/gbpservice/nfp/orchestrator/modules/sharing.py new file mode 100644 index 0000000000..b10399306e --- /dev/null +++ b/gbpservice/nfp/orchestrator/modules/sharing.py @@ -0,0 +1,117 @@ +from neutron._i18n import _LE +from neutron._i18n import _LI +from gbpservice.nfp.core import event as nfp_event +from gbpservice.nfp.core.event import Event +from gbpservice.nfp.core import module as nfp_api + +from gbpservice.nfp.orchestrator.modules import device_orchestrator +from gbpservice.nfp.common import constants as nfp_constants +from gbpservice.nfp.orchestrator.drivers import sharing_driver + +from gbpservice.nfp.core import log as nfp_logging +LOG = nfp_logging.getLogger(__name__) + + +def events_init(controller, config, orchestrator): + events = ['CREATE_NETWORK_FUNCTION_DEVICE'] + events_to_register = [] + for event in events: + events_to_register.append( + Event(id=event, handler=orchestrator)) + controller.register_events( + events_to_register, module='sharing', priority=1) + + +def nfp_module_init(controller, config): + events_init(controller, config, Sharing(controller, config)) + LOG.debug("Service Sharing: module_init") + + +class Sharing(nfp_api.NfpEventHandler): + + def __init__(self, controller, config): + self._controller = controller + self.config = config + self.device_orchestrator = device_orchestrator.DeviceOrchestrator( + controller, config) + self.sharing_driver = sharing_driver.SharingDriver(config) + + def handle_event(self, event): + if event.id == "CREATE_NETWORK_FUNCTION_DEVICE": + self.create_network_function_device(event) + else: + LOG.error(_LE("Invalid event: %(event_id)s for " + "event data %(event_data)s"), + {'event_id': event.id, 'event_data': event.data}) + + def _get_device_to_reuse(self, device_data): + device_filters = { + 'tenant_id': [device_data['tenant_id']], + 'service_vendor': [device_data['service_details'][ + 'service_vendor']], + 'status': [nfp_constants.ACTIVE] + } + devices = self.device_orchestrator._get_network_function_devices( + device_filters) + device = self.sharing_driver.select_network_function_device( + devices, device_data) + return device + + def create_network_function_device(self, event): + nfp_context = event.data + LOG.info(_LI("Orchestrator's sharing module received " + " create network function " + "device request with data %(data)s"), + {'data': nfp_context}) + device_data = ( + self.device_orchestrator._prepare_device_data_from_nfp_context( + nfp_context)) + device = self._get_device_to_reuse(device_data) + if device: + device.update(device_data) + # Existing device to be shared + # Trigger an event for Service Orchestrator + device['network_function_device_id'] = device['id'] + # Create an event to NSO, to give device_id + device_created_data = { + 'network_function_instance_id': ( + nfp_context['network_function_instance']['id']), + 'network_function_device_id': device['id'] + } + self.device_orchestrator._create_event( + event_id='DEVICE_CREATED', + event_data=device_created_data) + nfp_context['network_function_device'] = device + nfp_context['vendor_data'] = device['vendor_data'] + management_info = self.sharing_driver.get_managment_info(device) + management = nfp_context['management'] + management['port'] = management_info['neutron_port'] + management['port']['ip_address'] = management_info['ip_address'] + management['subnet'] = management_info['neutron_subnet'] + + # Since the device is already UP, create a GRAPH so that + # further processing continues in device orchestrator + nf_id = nfp_context['network_function']['id'] + nfp_context['event_desc'] = event.desc.to_dict() + du_event = self._controller.new_event(id="DEVICE_UP", + key=nf_id, + data=nfp_context, + graph=True) + + plug_int_event = self._controller.new_event(id="PLUG_INTERFACES", + key=nf_id, + data=nfp_context, + graph=True) + + graph = nfp_event.EventGraph(du_event) + graph.add_node(plug_int_event, du_event) + + graph_event = self._controller.new_event(id="DEVICE_SHARE_GRAPH", + graph=graph) + graph_nodes = [du_event, plug_int_event] + self._controller.post_event_graph(graph_event, graph_nodes) + else: + # Device does not exist. + # Post this event back to device orchestrator + # It will handle as it was handling in non sharing case + self._controller.post_event(event, target='device_orchestrator') diff --git a/gbpservice/nfp/service_plugins/loadbalancer/drivers/__init__.py b/gbpservice/nfp/pecan/__init__.py similarity index 100% rename from gbpservice/nfp/service_plugins/loadbalancer/drivers/__init__.py rename to gbpservice/nfp/pecan/__init__.py diff --git a/gbpservice/nfp/service_plugins/vpn/__init__.py b/gbpservice/nfp/pecan/api/__init__.py similarity index 100% rename from gbpservice/nfp/service_plugins/vpn/__init__.py rename to gbpservice/nfp/pecan/api/__init__.py diff --git a/gbpservice/nfp/base_configurator/api/config.py b/gbpservice/nfp/pecan/api/config.py similarity index 91% rename from gbpservice/nfp/base_configurator/api/config.py rename to gbpservice/nfp/pecan/api/config.py index 442308d9d0..cb09110a3e 100644 --- a/gbpservice/nfp/base_configurator/api/config.py +++ b/gbpservice/nfp/pecan/api/config.py @@ -59,10 +59,10 @@ } } } - -# Custom Configurations must be in Python dictionary format:: -# -# foo = {'bar':'baz'} -# -# All configurations are accessible at:: -# pecan.conf +cloud_services = [ + {'service_name': 'configurator', + 'topic': 'configurator', + 'reporting_interval': '10', # in seconds + 'apis': ['CONFIGURATION'] + } +] diff --git a/gbpservice/nfp/configurator/api/root_controller.py b/gbpservice/nfp/pecan/api/configurator_decider.py similarity index 52% rename from gbpservice/nfp/configurator/api/root_controller.py rename to gbpservice/nfp/pecan/api/configurator_decider.py index a2cc5d4dce..b97c52c38e 100644 --- a/gbpservice/nfp/configurator/api/root_controller.py +++ b/gbpservice/nfp/pecan/api/configurator_decider.py @@ -11,21 +11,20 @@ # under the License. import pecan -from v1 import controllers +from gbpservice.nfp.pecan import constants -class RootController(object): - """This is root controller that forward the request to __init__.py - file inside controller folder inside v1 - """ +class DecideConfigurator(pecan.commands.serve.ServeCommand): + ''' decides the type of configurtor to be used + like base_configurator or reference_configurator + ''' + arguments = pecan.commands.serve.ServeCommand.arguments + ({ + 'name': '--mode', + 'help': 'decides the type of configurtor to be used', + 'choices': constants.modes, + },) - v1 = controllers.V1Controller() - - @pecan.expose() - def get(self): - # TODO(blogan): once a decision is made on how to do versions, do that - # here - return {'versions': [{'status': 'CURRENT', - 'updated': '2014-12-11T00:00:00Z', - 'id': 'v1'}]} + def run(self, args): + setattr(pecan, 'mode', args.mode) + super(DecideConfigurator, self).run(args) diff --git a/gbpservice/tests/contrib/nfp_service/reference_configurator/api/root_controller.py b/gbpservice/nfp/pecan/api/root_controller.py similarity index 54% rename from gbpservice/tests/contrib/nfp_service/reference_configurator/api/root_controller.py rename to gbpservice/nfp/pecan/api/root_controller.py index 5407e27222..6b4ce09902 100644 --- a/gbpservice/tests/contrib/nfp_service/reference_configurator/api/root_controller.py +++ b/gbpservice/nfp/pecan/api/root_controller.py @@ -12,7 +12,7 @@ import pecan -from v1 import controllers +from gbpservice.nfp.pecan import constants class RootController(object): @@ -20,13 +20,26 @@ class RootController(object): file inside controller folder inside v1 """ + _controllers = {} - v1 = controllers.V1Controller() + for name, controller in constants.controllers.items(): + try: + _controllers.update({name: __import__(controller, + globals(), + locals(), + ['controllers'], -1)}) + except Exception: + pass + + if pecan.mode == constants.base: + v1 = _controllers[constants.BASE_CONTROLLER].V1Controller() + elif pecan.mode == constants.base_with_vm: + v1 = _controllers[constants.REFERENCE_CONTROLLER].V1Controller() + elif pecan.mode == constants.advanced: + v1 = _controllers[constants.ADVANCED_CONTROLLER].V1Controller() @pecan.expose() def get(self): - # TODO(blogan): once a decision is made on how to do versions, do that - # here return {'versions': [{'status': 'CURRENT', 'updated': '2014-12-11T00:00:00Z', 'id': 'v1'}]} diff --git a/gbpservice/tests/contrib/nfp_service/reference_configurator/api/setup.py b/gbpservice/nfp/pecan/api/setup.py similarity index 74% rename from gbpservice/tests/contrib/nfp_service/reference_configurator/api/setup.py rename to gbpservice/nfp/pecan/api/setup.py index e6042b44f2..1e84d2d961 100644 --- a/gbpservice/tests/contrib/nfp_service/reference_configurator/api/setup.py +++ b/gbpservice/nfp/pecan/api/setup.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -# -*- coding: utf-8 -*- try: import setuptools except ImportError: @@ -30,5 +29,11 @@ test_suite='api', zip_safe=False, include_package_data=True, - packages=setuptools.find_packages(exclude=['ez_setup']) + packages=setuptools.find_packages(exclude=['ez_setup']), + # Having entry point gives the option to define custom classes + # to improve the flexibility in accessing different configurators + entry_points=""" + [pecan.command] + configurator_decider = configurator_decider:DecideConfigurator + """ ) diff --git a/gbpservice/nfp/service_plugins/vpn/drivers/__init__.py b/gbpservice/nfp/pecan/api/v1/__init__.py similarity index 100% rename from gbpservice/nfp/service_plugins/vpn/drivers/__init__.py rename to gbpservice/nfp/pecan/api/v1/__init__.py diff --git a/gbpservice/tests/contrib/nfp_service/reference_configurator/api/v1/app.py b/gbpservice/nfp/pecan/api/v1/app.py similarity index 99% rename from gbpservice/tests/contrib/nfp_service/reference_configurator/api/v1/app.py rename to gbpservice/nfp/pecan/api/v1/app.py index f65e34406b..3dcb5ea236 100644 --- a/gbpservice/tests/contrib/nfp_service/reference_configurator/api/v1/app.py +++ b/gbpservice/nfp/pecan/api/v1/app.py @@ -16,7 +16,6 @@ def setup_app(config): app_conf = dict(config.app) - return pecan.make_app( app_conf.pop('root'), logging=getattr(config, 'logging', {}), diff --git a/gbpservice/nfp/base_configurator/api/base_controller.py b/gbpservice/nfp/pecan/base_controller.py similarity index 100% rename from gbpservice/nfp/base_configurator/api/base_controller.py rename to gbpservice/nfp/pecan/base_controller.py diff --git a/gbpservice/nfp/pecan/constants.py b/gbpservice/nfp/pecan/constants.py new file mode 100644 index 0000000000..b04b622ed7 --- /dev/null +++ b/gbpservice/nfp/pecan/constants.py @@ -0,0 +1,30 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +BASE_CONTROLLER = 'base_controller' +REFERENCE_CONTROLLER = 'reference_controller' +ADVANCED_CONTROLLER = 'advanced_controller' + + +controllers = { + BASE_CONTROLLER: 'gbpservice.nfp.base_configurator.controllers', + REFERENCE_CONTROLLER: ('gbpservice.tests.contrib' + '.nfp_service.reference_configurator.controllers'), + ADVANCED_CONTROLLER: ('gbpservice.contrib.nfp.configurator' + '.advanced_controller.controller_loader') +} + +base_with_vm = 'base_with_vm' +base = 'base' +advanced = 'advanced' +modes = [base, base_with_vm, advanced] diff --git a/gbpservice/nfp/scripts/README.txt b/gbpservice/nfp/scripts/README.txt new file mode 100644 index 0000000000..6e180b4e2d --- /dev/null +++ b/gbpservice/nfp/scripts/README.txt @@ -0,0 +1,35 @@ + + +Steps to shift from NFP to NSD: +=============================== + +Pre-requisite: +-------------- +NFP should be installed on the setup by following instructions from +gbpservice/devstack/Readme-NFP-install.txt + +Steps: +------ +(1) Get the enterprise source + # ENTERPRISE_BRANCH=mitaka_21st_march_base + # git clone -b $ENTERPRISE_BRANCH --single-branch https://github.com/oneconvergence/group-based-policy.git /home/stack/gbp_$ENTERPRISE_BRANCH + +(2) Configure the /home/stack/gbp_$ENTERPRISE_BRANCH/gbpservice/nfp/config/mode_shift.conf + Specify the path where the devstack git code is cloned. + # DEVSTACK_SRC_DIR= + + Specify the following details of visibility + # VISIBILITY_GIT_BRANCH=master + # GIT_ACCESS_USERNAME= + # GIT_ACCESS_PASSWORD= + # DOCKER_IMAGES_URL=http://192.168.100.50/docker_images/ + + Specification of the following image location is optional. If specified, + these images will be uploaded to Openstack glance. Otherwise, user has + to manually upload these image. + # AsavQcow2Image= + # PaloAltoQcow2Image= + +(3) Execute the script. + # cd /home/stack/gbp_$ENTERPRISE_BRANCH/gbpservice/nfp/scripts/ + # bash mode_shift.sh diff --git a/gbpservice/nfp/scripts/mode_shift.sh b/gbpservice/nfp/scripts/mode_shift.sh index 35b079dcf9..1c924bfec2 100644 --- a/gbpservice/nfp/scripts/mode_shift.sh +++ b/gbpservice/nfp/scripts/mode_shift.sh @@ -1,152 +1,424 @@ #! /bin/bash -source /opt/stack/gbp/gbpservice/nfp/config/mode_shift.conf - -DEVSTACK_DIR=/home/stack/devstack -source $DEVSTACK_DIR/local.conf -NFPSERVICE_DIR=/opt/stack/gbp -# TODO(DEEPAK): Should be retrieved from a result file populated by advanced mode. +SCRIPT_DIR=$PWD +ENTERPRISE_NFPSERVICE_DIR=$SCRIPT_DIR/../../../ +source $SCRIPT_DIR/../config/mode_shift.conf +source $DEVSTACK_SRC_DIR/local.conf +INSTALLED_NFPSERVICE_DIR=$DEST/gbp +# BUGBUG(DEEPAK): Should be retrieved from a result file populated by advanced mode. EXT_NET_NAME=ext-net -function create_port_for_vm { - image_name=$1 +function setup_ssh_key { + cd $SCRIPT_DIR + sudo ssh-keygen -f "/root/.ssh/known_hosts" -R $configurator_ip + sudo ssh-keygen -f configurator_vm -t rsa -N '' + echo "Give the password for the root user of the Configurator VM when prompted." + sleep 5 + cat configurator_vm.pub |\ + sudo ip netns exec nfp-proxy\ + ssh -o "StrictHostKeyChecking no" root@$configurator_ip\ + 'cat >> .ssh/authorized_keys' + sleep 5 +} - GROUP="svc_management_ptg" - echo "GroupName: $GROUP" - PortId=$(gbp policy-target-create --policy-target-group $GROUP $InstanceName | grep port_id | awk '{print $4}') +function copy_files { + cd $SCRIPT_DIR + + # Copy gbpservice/nfp from enterprise source + sudo cp -r\ + $ENTERPRISE_NFPSERVICE_DIR/gbpservice/nfp\ + $INSTALLED_NFPSERVICE_DIR/gbpservice/ + + # Copy gbpservice/contrib/nfp from enterprise source + sudo cp -r\ + $ENTERPRISE_NFPSERVICE_DIR/gbpservice/contrib/nfp\ + $INSTALLED_NFPSERVICE_DIR/gbpservice/contrib/ + + # Copy to Configurator from enterprise source + sudo ip netns exec nfp-proxy\ + ssh -o "StrictHostKeyChecking no" -i configurator_vm root@$configurator_ip\ + mkdir /enterprise_src + + sudo ip netns exec nfp-proxy\ + scp -o "StrictHostKeyChecking no" -i configurator_vm -r\ + $ENTERPRISE_NFPSERVICE_DIR/gbpservice/nfp\ + root@$configurator_ip:/enterprise_src/ + sudo ip netns exec nfp-proxy\ + ssh -o "StrictHostKeyChecking no" -i configurator_vm root@$configurator_ip\ + docker cp\ + /enterprise_src/nfp\ + configurator:/usr/local/lib/python2.7/dist-packages/gbpservice/ + + sudo ip netns exec nfp-proxy\ + scp -o "StrictHostKeyChecking no" -i configurator_vm -r\ + $ENTERPRISE_NFPSERVICE_DIR/gbpservice/contrib/nfp\ + root@$configurator_ip:/enterprise_src/contrib_nfp + sudo ip netns exec nfp-proxy\ + ssh -o "StrictHostKeyChecking no" -i configurator_vm root@$configurator_ip\ + docker cp\ + /enterprise_src/contrib_nfp\ + configurator:/usr/local/lib/python2.7/dist-packages/gbpservice/contrib/ + sudo ip netns exec nfp-proxy\ + ssh -o "StrictHostKeyChecking no" -i configurator_vm root@$configurator_ip\ + docker exec configurator\ + rm -rf /usr/local/lib/python2.7/dist-packages/gbpservice/contrib/nfp + sudo ip netns exec nfp-proxy\ + ssh -o "StrictHostKeyChecking no" -i configurator_vm root@$configurator_ip\ + docker exec configurator\ + mv /usr/local/lib/python2.7/dist-packages/gbpservice/contrib/contrib_nfp\ + /usr/local/lib/python2.7/dist-packages/gbpservice/contrib/nfp + + sudo ip netns exec nfp-proxy\ + ssh -o "StrictHostKeyChecking no" -i configurator_vm root@$configurator_ip\ + docker exec configurator\ + cp -r /usr/local/lib/python2.7/dist-packages/gbpservice/contrib/nfp/configurator/config /etc/nfp_config +} + +function update_db { + # Updates the DB model + db_name=nfp_enterprise_db + gbp-db-manage --config-file /etc/neutron/neutron.conf revision -m "$db_name" + + revision=$(sed -n '/revision = /p'\ + $INSTALLED_NFPSERVICE_DIR/gbpservice/neutron/db/migration/alembic_migrations/versions/*$db_name.py |\ + awk 'NR==1{print $3}') + down_revision=$(sed -n '/revision = /p'\ + $INSTALLED_NFPSERVICE_DIR/gbpservice/neutron/db/migration/alembic_migrations/versions/*$db_name.py |\ + awk 'NR==2{print $3}') + + sed -i -e "s/revision = *.*/revision = $revision/" \ + -e "s/down_revision = *.*/down_revision = $down_revision/" \ + -e "s/Revision ID:*.*/Revision ID: "$(echo $revision | tr -d "'")"/" \ + -e "s/Revises:*.*/Revises: "$(echo $down_revision | tr -d "'")"/"\ + $ENTERPRISE_NFPSERVICE_DIR/gbpservice/nfp/orchestrator/db/enterprise_migration/$db_name.py + + # The sleep here is necessary as we need to give db migration script + # some time for processing the new revision file. + sleep 5 + + sudo cp\ + $ENTERPRISE_NFPSERVICE_DIR/gbpservice/nfp/orchestrator/db/enterprise_migration/$db_name.py\ + $INSTALLED_NFPSERVICE_DIR/gbpservice/neutron/db/migration/alembic_migrations/versions/*$db_name.py + + gbp-db-manage --config-file /etc/neutron/neutron.conf upgrade head +} + +function nfp_configure_nova { + NOVA_CONF_DIR=/etc/nova + NOVA_CONF=$NOVA_CONF_DIR/nova.conf + source $DEVSTACK_SRC_DIR/inc/ini-config + iniset $NOVA_CONF DEFAULT instance_usage_audit "True" + + for proc in n-cpu n-cond n-sch n-novnc n-cauth n-api; do + # Can be used to run the binary in a specific environment + # A silly example will be 'watch free -m' where watch is the + # sandbox and free is the proc + sandbox= + param=--config-file\ /etc/nova/nova.conf + # Multiple config files can be given as space separated + # e.g.: --config-file \ --config-file\ + extra_param= + case $proc in + n-cpu) + sandbox=sg\ libvirtd + proc_name=nova-compute + ;; + n-cond) + proc_name=nova-conductor + ;; + n-sch) + proc_name=nova-scheduler + ;; + n-novnc) + proc_name=nova-novncproxy + extra_param=--web\ /opt/stack/noVNC + ;; + n-cauth) + proc_name=nova-consoleauth + ;; + n-api) + proc_name=nova-api + param= + ;; + esac + restart_devstack_screen_processes "$proc" "$sandbox" "$proc_name" "$param" "$extra_param" + done +} + +function restart_devstack_screen_processes { + SCREEN_NAME=stack + SERVICE_DIR=$DEST/status/$SCREEN_NAME + bin=/usr/local/bin + proc_screen_name=$1 + sandbox=$2 + proc_name=$3 + param=$4 + extra_param=$5 + + cmd=$bin/$proc_name\ $param\ $extra_param + cmd="$(echo -e "${cmd}" | sed -e 's/[[:space:]]*$//')" + + if [[ ! -z "${sandbox// }" ]]; then + cmd=$sandbox\ \'$cmd\' + fi - echo "Getting IpAddr for port: $PortId" - IpAddr_extractor=`neutron port-list|grep $PortId|awk '{print $11}'` + # stop the process + screen -S $SCREEN_NAME -p $proc_screen_name -X kill + sleep 4 + + # start the process + screen -S $SCREEN_NAME -X screen -t $proc_screen_name + screen -S $SCREEN_NAME -p $proc_screen_name -X stuff "$cmd \ + & echo \$! >$SERVICE_DIR/${proc_screen_name}.pid; fg || \ + echo \"$proc_screen_name failed to start\" \ + | tee \"$SERVICE_DIR/${proc_screen_name}.failure\"\n" + sleep 5 +} + +function create_port_for_vm { +# $1 is image_name +# $2 is instance name + GROUP="svc_management_ptg" + PortId=$(gbp policy-target-create --policy-target-group $GROUP $2 | grep port_id | awk '{print $4}') + IpAddr_extractor=`neutron port-list --format value | grep $PortId | awk '{print $7}'` IpAddr_purge_last=${IpAddr_extractor::-1} IpAddr=${IpAddr_purge_last//\"/} - echo "Collecting IpAddr : for $PortId" - echo $IpAddr + echo "IpAddr of port($PortId): $IpAddr" + visibility_image_name=$1 + visibility_port_id=$PortId + visibility_ip=$IpAddr } function configure_vis_ip_addr_in_docker { - echo "Visibility VM IP address is: $IpAddr" - sed -i "s/VIS_VM_IP_ADDRESS/"$IpAddr"/" $NFPSERVICE_DIR/gbpservice/nfp/configurator/Dockerfile + cd $SCRIPT_DIR + sudo ip netns exec nfp-proxy\ + ssh -o "StrictHostKeyChecking no" -i configurator_vm root@$configurator_ip\ + docker exec configurator\ + sed -i "s/log_forward_ip_address=*.*/log_forward_ip_address=$visibility_ip/" /etc/nfp_configurator.ini } function create_images { - source $DEVSTACK_DIR/openrc neutron service - unset OS_USER_DOMAIN_ID - unset OS_PROJECT_DOMAIN_ID - # prepare visibility image and upload it into glance VISIBILITY_QCOW2_IMAGE=${VISIBILITY_QCOW2_IMAGE:-build} VISIBILITY_QCOW2_IMAGE_NAME=visibility InstanceName="VisibilityVM_instance" - create_port_for_vm $VISIBILITY_QCOW2_IMAGE_NAME + create_port_for_vm $VISIBILITY_QCOW2_IMAGE_NAME $InstanceName + # edits the docker file to add visibility vm IP address + configure_vis_ip_addr_in_docker if [[ $VISIBILITY_QCOW2_IMAGE = build ]]; then - # edits the docker file to add visibility vm IP address - configure_vis_ip_addr_in_docker - # prepare visibility source, this is needed for diskimage build cd /home/stack/ sudo rm -rf visibility - sudo git clone https://$GIT_ACCESS_USERNAME:$GIT_ACCESS_PASSWORD@github.com/oneconvergence/visibility.git -b $VISIBILITY_GIT_BRANCH + sudo git clone\ + https://$GIT_ACCESS_USERNAME:$GIT_ACCESS_PASSWORD@github.com/oneconvergence/visibility.git\ + -b $VISIBILITY_GIT_BRANCH echo "Building Image: $VISIBILITY_QCOW2_IMAGE_NAME" - cd $DEST/gbp/gbpservice/tests/contrib/diskimage-create/ - sudo python visibility_disk_image_create.py visibility_conf.json $DEVSTACK_DIR/local.conf - VISIBILITY_QCOW2_IMAGE=$(cat /tmp/image_path) + cd $ENTERPRISE_NFPSERVICE_DIR/gbpservice/tests/contrib/diskimage-create/ + sudo python visibility_disk_image_create.py\ + visibility_conf.json $GBPSERVICE_BRANCH $DOCKER_IMAGES_URL + VISIBILITY_QCOW2_IMAGE=$(cat output/last_built_image_path) fi echo "Uploading Image: $VISIBILITY_QCOW2_IMAGE_NAME" - glance image-create --name $VISIBILITY_QCOW2_IMAGE_NAME --disk-format qcow2 --container-format bare --visibility public --file $VISIBILITY_QCOW2_IMAGE + glance image-create\ + --name $VISIBILITY_QCOW2_IMAGE_NAME\ + --disk-format qcow2\ + --container-format bare\ + --visibility public\ + --file $VISIBILITY_QCOW2_IMAGE sleep 4 if ! [[ -z $AsavQcow2Image ]]; then - gbp service-profile-create --servicetype FIREWALL --insertion-mode l3 --shared True --service-flavor service_vendor=asav,device_type=nova --vendor NFP asav_fw_profile + gbp service-profile-create\ + --servicetype FIREWALL\ + --insertion-mode l3\ + --shared True\ + --service-flavor service_vendor=asav,device_type=nova\ + --vendor NFP\ + asav_fw_profile ASAV_QCOW2_IMAGE_NAME=asav echo "Uploading Image: $ASAV_QCOW2_IMAGE_NAME" - glance image-create --name $ASAV_QCOW2_IMAGE_NAME --disk-format qcow2 --container-format bare --visibility public --file $AsavQcow2Image + glance image-create\ + --name $ASAV_QCOW2_IMAGE_NAME\ + --disk-format qcow2\ + --container-format bare\ + --visibility public\ + --file $AsavQcow2Image + fi + + if ! [[ -z $PaloAltoQcow2Image ]]; then + PALO_ALTO_QCOW2_IMAGE_NAME=paloalto + echo "Uploading Image: $PALO_ALTO_QCOW2_IMAGE_NAME" + glance image-create\ + --name $PALO_ALTO_QCOW2_IMAGE_NAME\ + --disk-format qcow2\ + --container-format bare\ + --visibility public\ + --file $PaloAltoQcow2Image fi } -function nfp_configure_nova { - NOVA_CONF_DIR=/etc/nova - NOVA_CONF=$NOVA_CONF_DIR/nova.conf - source $DEVSTACK_DIR/inc/ini-config - iniset $NOVA_CONF DEFAULT instance_usage_audit "True" - - source $DEVSTACK_DIR/functions-common - stop_process n-cpu - stop_process n-cond - stop_process n-sch - stop_process n-novnc - stop_process n-cauth - stop_process n-api - - source $DEVSTACK_DIR/lib/nova - start_nova_compute - start_nova_api - run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CONF" - run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $NOVA_CONF" - run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $NOVA_CONF --web $DEST/noVNC" - run_process n-cauth "$NOVA_BIN_DIR/nova-consoleauth --config-file $NOVA_CONF" +function configure_visibility_user_data { +# $1 is the Visibility VM's IP address + CUR_DIR=$PWD + visibility_vm_ip=$1 + sudo rm -rf /opt/visibility_user_data + sudo cp -r $ENTERPRISE_NFPSERVICE_DIR/devstack/exercises/nfp_service/user-data/visibility_user_data /opt/. + cd /opt + sudo rm -rf my.key my.key.pub + sudo ssh-keygen -t rsa -N "" -f my.key + value=`sudo cat my.key.pub` + sudo echo $value + sudo sed -i "s||${value}|" visibility_user_data + sudo sed -i "s/visibility_vm_ip=*.*/visibility_vm_ip=$visibility_vm_ip/g" visibility_user_data + sudo sed -i "s/os_controller_ip=*.*/os_controller_ip=$HOST_IP/g" visibility_user_data + sudo sed -i "s/statsd_host=*.*/statsd_host=$visibility_vm_ip/g" visibility_user_data + sudo sed -i "s/rabbit_host=*.*/rabbit_host=$configurator_ip/g" visibility_user_data + cd $CUR_DIR } -function prepare_for_mode_shift { - if [[ $FROM = advanced ]] && [[ $TO = enterprise ]]; then - source $DEST/gbp/devstack/lib/nfp +function attach_security_groups { + SecGroup="allow_all" + nova secgroup-create $SecGroup "allow all traffic" + nova secgroup-add-rule $SecGroup udp 1 65535 120.0.0.0/24 + nova secgroup-add-rule $SecGroup icmp -1 -1 120.0.0.0/24 + nova secgroup-add-rule $SecGroup tcp 1 65535 120.0.0.0/24 + nova secgroup-add-rule $SecGroup tcp 80 80 0.0.0.0/0 + nova secgroup-add-rule $SecGroup udp 514 514 0.0.0.0/0 + nova secgroup-add-rule $SecGroup tcp 443 443 0.0.0.0/0 - echo "Preparing image creation" - create_images - nfp_configure_nova - sleep 10 - echo "Launching the Visibility VM" - launch_visibilityVM + nova add-secgroup $InstanceName $SecGroup +} - nfp_logs_forword +function launch_visibilityVM { + neutron net-create visibility-network + neutron subnet-create visibility-network 188.0.0.0/24 --name visibility-subnet + neutron router-create visibility-router + neutron router-gateway-set visibility-router $EXT_NET_NAME + neutron router-interface-add visibility-router visibility-subnet + ExtPortId=$(neutron port-create visibility-network | grep ' id ' | awk '{print $4}') + fip_id=$(neutron floatingip-create $EXT_NET_NAME | grep ' id '| awk '{print $4}') + neutron floatingip-associate $fip_id $ExtPortId + IpAddr_extractor=`neutron port-list --format value|grep $ExtPortId|awk '{print $6}'` + IpAddr_purge_last=${IpAddr_extractor::-1} + IpAddr2=${IpAddr_purge_last//\"/} + echo "Collecting IpAddr : for $ExtPortId" + echo $IpAddr2 + + echo "Collecting ImageId : for $visibility_image_name" + ImageId=`glance image-list|grep $visibility_image_name |awk '{print $2}'` + if [ ! -z "$ImageId" -a "$ImageId" != " " ]; then + echo $ImageId else - echo "Shifting from $FROM mode to $TO mode is not supported." + echo "No image found with name $visibility_image_name ..." + exit fi + + configure_visibility_user_data $visibility_ip + echo "Launching Visibility image" + nova boot\ + --image $ImageId\ + --flavor m1.xlarge\ + --user-data /opt/visibility_user_data\ + --nic port-id=$visibility_port_id\ + --nic port-id=$ExtPortId\ + $InstanceName + sleep 10 + attach_security_groups } -function delete_instance_and_image { - - # delete the instance - echo "Deleting the running '$2' instance." - nova delete $2 +function nfp_logs_forword { + VISIBILITY_CONF="/etc/rsyslog.d/visibility.conf" + SYSLOG_CONFIG="/etc/rsyslog.conf" + log_facility=local1 + + sudo sed -i '/#$ModLoad imudp/ s/^#//' $SYSLOG_CONFIG + sudo sed -i '/#$UDPServerRun 514/ s/^#//' $SYSLOG_CONFIG + echo "Successfully enabled UDP in syslog" + + visibility_vm_ip_address=$(neutron floatingip-list --format value | grep "$IpAddr2" | awk '{print $3}') + echo "$log_facility.* @$visibility_vm_ip_address:514" | sudo tee $VISIBILITY_CONF + echo "Created $VISIBILITY_CONF file" + + sudo service rsyslog restart + if [ $? -ne 0 ]; then + echo "ERROR: Failed to restart rsyslog" + fi +} + +function restart_screen_process { + SCREEN_NAME=stack + SERVICE_DIR=$DEST/status + name=$1 + cmd=$2 + + # stop the process + screen -S $SCREEN_NAME -p $name -X kill + + sleep 2 + + # start the process + screen -S $SCREEN_NAME -X screen -t $name + screen -S $SCREEN_NAME -p $name -X stuff "$cmd & echo \$! >$SERVICE_DIR/$SCREEN_NAME/${name}.pid; fg || echo \"$name failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/${name}.failure\"\n" + sleep 5 - - echo "Deleting '$1' glance image." - image_id=$(glance image-list | grep $1 | awk '{print $2}') - glance image-delete $image_id } - function restart_processes { - source $DEVSTACK_DIR/functions-common - source $DEVSTACK_DIR/openrc neutron service - - # restart proxy - stop_process proxy - run_process proxy "source $NFPSERVICE_DIR/devstack/lib/nfp;namespace_delete $DEVSTACK_DIR;namespace_create $DEVSTACK_DIR $IpAddr" - echo "Restarted proxy process" - sleep 10 + cd $SCRIPT_DIR + + restart_screen_process nfp_orchestrator "sudo /usr/bin/nfp --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/nfp_orchestrator.ini --log-file $DEST/logs/nfp_orchestrator.log" - # restart proxy agent - stop_process proxy_agent - run_process proxy_agent "sudo /usr/bin/nfp --config-file /etc/nfp_proxy_agent.ini --log-file /opt/stack/logs/nfp_proxy_agent.log" - echo "Restarted proxy agent process" - sleep 3 + # restart_screen_process nfp_proxy_agent "sudo /usr/bin/nfp --config-file /etc/nfp_proxy_agent.ini --log-file $DEST/logs/nfp_proxy_agent.log" + # restart_screen_process nfp_proxy "source $INSTALLED_NFPSERVICE_DIR/devstack/lib/nfp; namespace_delete; namespace_create" + + restart_screen_process nfp_config_orchestrator "sudo /usr/bin/nfp --config-file /etc/nfp_config_orch.ini --config-file /etc/neutron/neutron.conf --log-file $DEST/logs/nfp_config_orchestrator.log" + + # restart nfp_configurator + sudo ip netns exec nfp-proxy\ + ssh -o "StrictHostKeyChecking no" -i configurator_vm root@$configurator_ip\ + docker exec configurator screen -S configurator -X quit + sudo ip netns exec nfp-proxy\ + ssh -o "StrictHostKeyChecking no" -i configurator_vm root@$configurator_ip\ + docker exec configurator screen -dmS "configurator" /usr/bin/python2 /usr/bin/nfp --config-file=/etc/nfp_configurator.ini --config-dir=/etc/nfp_config --log-file=/var/log/nfp/nfp_configurator.log } +function prepare_for_mode_shift { + if [[ $FROM = advanced ]] && [[ $TO = enterprise ]]; then + source $DEVSTACK_SRC_DIR/openrc neutron service + unset OS_USER_DOMAIN_ID + unset OS_PROJECT_DOMAIN_ID + + # BUGBUG(RPM): Configurator's port name should be retrieved from a result file populated by advanced mode. + configurator_ip=`neutron port-show pt_configuratorVM_instance -f value -c fixed_ips | cut -d'"' -f8` + echo "Configurator's IP: $configurator_ip" + + echo "Setting up ssh key in configurator for password less ssh" + setup_ssh_key + echo "Copy files and configure" + copy_files + update_db + + echo "Configuring nova" + nfp_configure_nova + sleep 10 + + echo "Preparing image creation" + create_images + echo "Launching the Visibility VM" + launch_visibilityVM + nfp_logs_forword + else + echo "Shifting from $FROM mode to $TO mode is not supported." + fi +} function mode_shift { if [[ $FROM = advanced ]] && [[ $TO = enterprise ]]; then - sudo sed -i 's/rest_server_address=.*/rest_server_address='$IpAddr'/' /etc/nfp_proxy.ini - echo "Restarting various processes" restart_processes - - image=configurator - instance_name=configuratorVM_instance - delete_instance_and_image $image $instance_name else echo "Shifting from $FROM mode to $TO mode is not supported." fi @@ -158,7 +430,7 @@ echo "Task: Shifting mode of NFP from $FROM mode to $TO mode." echo "Preparing for the NFP mode shift." prepare_for_mode_shift -echo "Shifting NFP to $TO mode. There will be a little downtime. Kindly bear with me." +echo "Shifting NFP to $TO mode. There will be a little downtime. Kindly bear with it." mode_shift echo "Successfully shifted NFP from $FROM mode to $TO mode." diff --git a/gbpservice/nfp/service_plugins/loadbalancer/drivers/nfp_lbaas_plugin_driver.py b/gbpservice/nfp/service_plugins/loadbalancer/drivers/nfp_lbaas_plugin_driver.py deleted file mode 100644 index 96b7c524d3..0000000000 --- a/gbpservice/nfp/service_plugins/loadbalancer/drivers/nfp_lbaas_plugin_driver.py +++ /dev/null @@ -1,19 +0,0 @@ -from gbpservice.nfp.config_orchestrator.common import topics -from gbpservice.nfp.configurator.drivers.loadbalancer.v1.haproxy import ( - haproxy_lb_driver -) -from neutron_lbaas.services.loadbalancer.drivers.common import ( - agent_driver_base as adb -) - - -class HaproxyOnVMPluginDriver(adb.AgentDriverBase): - device_driver = haproxy_lb_driver.DRIVER_NAME - - def __init__(self, plugin): - # Monkey patch LB agent topic and LB agent type - adb.l_const.LOADBALANCER_AGENT = topics.LB_NFP_CONFIGAGENT_TOPIC - adb.q_const.AGENT_TYPE_LOADBALANCER = 'NFP Loadbalancer agent' - - super(HaproxyOnVMPluginDriver, self).__init__(plugin) - diff --git a/gbpservice/nfp/service_vendor_agents/haproxy/haproxy-agent/etc/dhcp/dhclient-exit-hooks.d/haproxy_routing b/gbpservice/nfp/service_vendor_agents/haproxy/haproxy-agent/etc/dhcp/dhclient-exit-hooks.d/haproxy_routing index d3ca20072a..741e53de29 100644 --- a/gbpservice/nfp/service_vendor_agents/haproxy/haproxy-agent/etc/dhcp/dhclient-exit-hooks.d/haproxy_routing +++ b/gbpservice/nfp/service_vendor_agents/haproxy/haproxy-agent/etc/dhcp/dhclient-exit-hooks.d/haproxy_routing @@ -1,7 +1,6 @@ #!/bin/bash #set -x -#echo "interface : $interface" -#echo "new routers : $new_routers" + #Converts decimal to ip address function dec2ip () { local ip dec=$1 @@ -26,16 +25,28 @@ function ip2dec () { all_interfaces=`ifconfig | grep "Link encap" | grep -v "127.0.0.1" |cut -d: -d " " -f 1`; index=0; for i in $all_interfaces; do - if [ "$i" != "lo" ] #&& [ "$i" != "eth0" ] - #if [ "$i" == "$interface" ] + if [ "$i" != "lo" ] then - ip_addr=`ip addr show $i | grep "inet " | grep -v "/32" | awk -F'[/ ]+' '{ print $3 }'` + interface_ip_configure_time=30 + while [[ $interface_ip_configure_time -gt 0 ]]; do + ip_addr=`ip addr show $i | grep "inet " | grep -v "/32" | awk -F'[/ ]+' '{ print $3 }'` + if [[ -z $ip_addr ]]; then + sleep 5 + else + break + fi + ((interface_ip_configure_time-=5)) + done + if [[ -z $ip_addr ]]; then + echo "Hotplugged interface $i doesn't have an IP address. Hence, EXITING!!!" >&2 + exit 1 + fi + bcast_ip=`ip addr show $i | grep "inet " | grep -v "/32" | awk -F'[/ ]+' '{ print $6 }'`; filename="/var/lib/dhcp/dhclient."$i".leases" if [ "$i" == "$interface" ] then - #mask=`grep "option subnet-mask" $filename |tail -1| awk -F'[; ]+' '{ print $4 }'` mask=$new_subnet_mask else mask=`grep "option subnet-mask" $filename |tail -1| awk -F'[; ]+' '{ print $4 }'` @@ -72,7 +83,6 @@ for i in $all_interfaces; do fi index=`expr $index + 1` - #echo $net_ip_with_mask $i $mask_num `ip route flush dev $i` `ip route add $net_ip_with_mask dev $i` @@ -93,8 +103,6 @@ echo ${default_route[*]} index=0 echo $interface for interface in ${interfaces[*]}; do - #if [ "$interface1" == "$interface" ] - #then table_name=$interface"_table" mask_num=`ip addr show dev $interface | grep -m2 "inet " | grep -v "/32" | awk -F'[: ]+' '{ print $3 }'|cut -d '/' -f 2` net_ip_with_mask="${network_id[$index]}/${mask_num}" @@ -113,6 +121,5 @@ for interface in ${interfaces[*]}; do fi `ip rule add from ${ip_addresses[$index]} table $table_name` index=`expr $index + 1` - #fi done diff --git a/gbpservice/nfp/service_vendor_agents/vyos/LICENSE README b/gbpservice/nfp/service_vendor_agents/vyos/LICENSE README deleted file mode 100644 index 81526a2dc4..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/LICENSE README +++ /dev/null @@ -1,2 +0,0 @@ -configsession.py and utils.py are opens source files and originally taken from -"https://github.com/abessifi/pyatta". \ No newline at end of file diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/DEBIAN/changelog b/gbpservice/nfp/service_vendor_agents/vyos/agent/DEBIAN/changelog new file mode 100644 index 0000000000..c58aa95e26 --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/DEBIAN/changelog @@ -0,0 +1,17 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +vyos (2.0) INITIAL RELEASE; urgency=low + + * Supports firewall and VPN + + -- One Convergence Wed, 13 Jul 2016 05:20:46 +0530 diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/DEBIAN/control b/gbpservice/nfp/service_vendor_agents/vyos/agent/DEBIAN/control new file mode 100644 index 0000000000..f8e7d09950 --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/DEBIAN/control @@ -0,0 +1,8 @@ +Package: vyos +Source: vyos +Version: 2.0 +Architecture: all +Maintainer: One Convergence +Section: devel +Priority: optional +Description: Vyos package that supports Firewall and VPN diff --git a/gbpservice/nfp/configurator/api/setup.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/DEBIAN/postinst old mode 100644 new mode 100755 similarity index 56% rename from gbpservice/nfp/configurator/api/setup.py rename to gbpservice/nfp/service_vendor_agents/vyos/agent/DEBIAN/postinst index e6042b44f2..78055a213c --- a/gbpservice/nfp/configurator/api/setup.py +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/DEBIAN/postinst @@ -1,3 +1,5 @@ +#!/bin/bash + # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -10,25 +12,12 @@ # License for the specific language governing permissions and limitations # under the License. -# -*- coding: utf-8 -*- -try: - import setuptools -except ImportError: - import ez_setup - ez_setup.use_setuptools() - import setuptools - -setuptools.setup( - name='api', - version='0.1', - description='', - author='', - author_email='', - install_requires=[ - "pecan", - ], - test_suite='api', - zip_safe=False, - include_package_data=True, - packages=setuptools.find_packages(exclude=['ez_setup']) -) +sudo chown -R root:vyattacfg /config/auth/ +sudo chown -R root:vyattacfg /config/scripts/ +sudo update-rc.d vyos defaults +sudo mkdir -p /var/log/vyos +touch /var/log/vyos/vyos.log +echo "" > /var/log/vyos/vyos_monitor +sudo chown vyos:users -R /var/log/vyos /usr/share/vyos /usr/share/vyos-pbr +cp /usr/share/vyos-pbr/dhclient-script /sbin/dhclient-script +sudo cp /usr/share/vyos/config_server/vyos-log /etc/logrotate.d/vyos-log diff --git a/gbpservice/nfp/base_configurator/api/v1/app.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/bin/vyos old mode 100644 new mode 100755 similarity index 74% rename from gbpservice/nfp/base_configurator/api/v1/app.py rename to gbpservice/nfp/service_vendor_agents/vyos/agent/bin/vyos index f65e34406b..9f523832ef --- a/gbpservice/nfp/base_configurator/api/v1/app.py +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/bin/vyos @@ -1,3 +1,5 @@ +#!/usr/bin/env python + # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -10,15 +12,9 @@ # License for the specific language governing permissions and limitations # under the License. -import pecan - - -def setup_app(config): +import sys - app_conf = dict(config.app) +sys.path.append("/usr/share/vyos") - return pecan.make_app( - app_conf.pop('root'), - logging=getattr(config, 'logging', {}), - **app_conf - ) +from config_server.server import main +main() diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/etc/init.d/vyos b/gbpservice/nfp/service_vendor_agents/vyos/agent/etc/init.d/vyos new file mode 100755 index 0000000000..d36f31861f --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/etc/init.d/vyos @@ -0,0 +1,139 @@ +#! /bin/sh +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +### BEGIN INIT INFO +# Provides: vyos +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Vyos agent service +# Description: Provides the vyos agent service +### END INIT INFO +set -e +PIDFILE=/var/run/vyos/vyos.pid +LOGFILE=/var/log/vyos/vyos.log +DAEMON=/usr/bin/vyos +DAEMON_ARGS="--log-file=$LOGFILE" +DAEMON_DIR=/var/run/vyos +ENABLED=true + +if test -f /etc/default/vyos; then + . /etc/default/vyos +fi + +mkdir -p /var/run/vyos +mkdir -p /var/log/vyos +. /lib/lsb/init-functions +export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" +export TMPDIR=/var/lib/vyos/tmp + +if [ ! -x ${DAEMON} ] ; then + exit 0 +fi + +case "$1" in +start) +test "$ENABLED" = "true" || exit 0 +start=1 +## check if pidfile is there +if [ -f $PIDFILE ]; then + pid=`cat $PIDFILE` + ## check if pid is there + if [ "1$pid" -ne "1" ]; then + ## check if process with pid not running + set +e + kill -0 $pid > /dev/null 2>&1 + [ $? -eq 0 ] && start=0 + set -e + fi +fi + +if [ $start -eq 1 ]; then + ## ensure stale processes killed + set +e + running_processes=`ps aux | grep "python /usr/bin/vyos" | grep -v grep | awk '{print $2}' | wc -l` + [ $running_processes -gt 0 ] && ps aux | grep "python /usr/bin/vyos" | grep -v grep | awk '{print $2}' | xargs kill -9 + set -e + log_daemon_msg "Starting Vyos Agent" + # We have completely messed up the rc level scripts + sudo chown vyos:users -R /var/run/vyos + sudo -u vyos start-stop-daemon -Sbmv --pidfile $PIDFILE --chdir $DAEMON_DIR --exec $DAEMON -- $DAEMON_ARGS + log_end_msg $? +else + echo "vyos[$pid] is already running" +fi + +;; +stop) +test "$ENABLED" = "true" || exit 0 + +if [ -f $PIDFILE ]; then + set +e + kill -0 `cat $PIDFILE` > /dev/null 2>&1 + if [ $? -eq 0 ]; then + set -e + log_daemon_msg "Stopping Vyos Agent" + start-stop-daemon --stop --oknodo --pidfile ${PIDFILE} + log_end_msg $? + else + echo "No process with PID `cat $PIDFILE` found running, removing the PID file" + fi + rm $PIDFILE +else + echo "PID file not existing" +fi + +## ensure stale processes killed +set +e +running_processes=`ps aux | grep "python /usr/bin/vyos" | grep -v grep | awk '{print $2}' | wc -l` +[ $running_processes -gt 0 ] && ps aux | grep "python /usr/bin/vyos" | grep -v grep | awk '{print $2}' | xargs kill -9 +set -e +;; +restart|force-reload) +test "$ENABLED" = "true" || exit 1 +$0 stop +sleep 2 +$0 start +;; +reload) +test "$ENABLED" = "true" || exit 0 + +## check if pidfile is there +if [ -f $PIDFILE ]; then + set +e + kill -0 `cat $PIDFILE` > /dev/null 2>&1 + if [ $? -eq 0 ]; then + set -e + log_daemon_msg "Reloading vyos agent" + start-stop-daemon --stop --signal 1 --quiet --oknodo --pidfile $PIDFILE + log_end_msg $? + else + echo "No process with PID `cat $PIDFILE` found running, removing the PID file" + fi +else + echo "Vyos agent is not running or PID file not existing" +fi + +;; +status) +test "$ENABLED" = "true" || exit 0 +status_of_proc -p $PIDFILE $DAEMON vyos && exit 0 || exit $? +;; +*) +log_action_msg "Usage: /etc/init.d/vyos {start|stop|restart|force-reload|reload|status}" +exit 1 +;; +esac +exit 0 diff --git a/gbpservice/nfp/service_vendor_agents/vyos/__init__.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/__init__.py similarity index 100% rename from gbpservice/nfp/service_vendor_agents/vyos/__init__.py rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/__init__.py diff --git a/gbpservice/nfp/service_vendor_agents/vyos/auth_pam.pl b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/auth_pam.pl similarity index 92% rename from gbpservice/nfp/service_vendor_agents/vyos/auth_pam.pl rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/auth_pam.pl index 3cf31fa4cb..face3217bf 100755 --- a/gbpservice/nfp/service_vendor_agents/vyos/auth_pam.pl +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/auth_pam.pl @@ -1,5 +1,17 @@ #!/usr/bin/perl +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + use Data::Dumper; local $Data::Dumper::Terse =1; use JSON; @@ -81,7 +93,7 @@ sub read_auth_server_conf { # Get auth server conf from file - my $AUTH_SERVER_CONF_FILE = "/usr/share/vyos-oc/auth_server.conf"; + my $AUTH_SERVER_CONF_FILE = "/usr/share/vyos/auth_server.conf"; if (!open (AUTHFILE, $AUTH_SERVER_CONF_FILE)) { print "Could not open auth file : $AUTH_SERVER_CONF_FILE\n"; @@ -101,7 +113,6 @@ sub read_auth_server_conf { chomp $REMOTE_VPN_ROLE_NAME; chomp $SERVICE_PROJECT_ID; - #$DB::single = 1; close(AUTHFILE); } @@ -139,16 +150,13 @@ sub read_username_passwd { sub get_cloud_admin_token { - #$DB::single = 1; my $http_req = HTTP::Request->new(POST => $url_get_admin_token); $http_req->header('content-type' => 'application/json'); $get_admin_token_data->{"auth"}{"identity"}{"password"}{"user"}{"name"} = $cloud_admin_username; $get_admin_token_data->{"auth"}{"identity"}{"password"}{"user"}{"password"} = $cloud_admin_password; - #$get_admin_token_data->{"auth"}{"scope"}{"project"}{"name"} = $cloud_admin_projname; $json_string = to_json($get_admin_token_data); $http_req->content($json_string); - #$http_req->content($get_admin_token_data); my $http_resp = $httpclient->request($http_req); if ($http_resp->is_success) { my $message = $http_resp->decoded_content; @@ -165,7 +173,6 @@ sub get_cloud_admin_token { sub get_domain_id { my $http_req = HTTP::Request->new(GET => $url_get_domain); - #$DB::single = 1; $http_req->header('content-type' => 'application/json'); $http_req->header('x-auth-token' => $admin_token_id); @@ -187,7 +194,6 @@ sub get_domain_id { sub get_role_id { my $http_req = HTTP::Request->new(GET => $url_get_role_id); - #$DB::single = 1; $http_req->header('content-type' => 'application/json'); $http_req->header('x-auth-token' => $admin_token_id); @@ -222,7 +228,6 @@ sub user_authenticate { if ($http_resp->is_success) { my $message = $http_resp->decoded_content; - #$DB::single = 1; my $decoded_resp = decode_json($message); $user_token_id = $http_resp->headers->{'x-subject-token'}; $user_id = $decoded_resp->{'token'}->{'user'}->{'id'}; @@ -239,7 +244,6 @@ sub user_authenticate { sub get_user_roles { $url_get_role_assignment = $KEYSTONE_AUTH_URL . "/v3/role_assignments?user.id=$user_id&role.id=$user_role_id"; my $http_req = HTTP::Request->new(GET => $url_get_role_assignment); - #$DB::single = 1; $http_req->header('content-type' => 'application/json'); $http_req->header('x-auth-token' => $admin_token_id); @@ -247,7 +251,6 @@ sub get_user_roles { if ($http_resp->is_success) { my $message = $http_resp->decoded_content; my $decoded_resp = decode_json($message); - #$DB::single = 1; my $user_roles = $decoded_resp->{'role_assignments'}; my $len = @{$user_roles}; if ($len) { diff --git a/gbpservice/nfp/service_vendor_agents/vyos/auth_pam_domain_verify.pl b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/auth_pam_domain_verify.pl similarity index 92% rename from gbpservice/nfp/service_vendor_agents/vyos/auth_pam_domain_verify.pl rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/auth_pam_domain_verify.pl index d5d32c58f7..2729c34ee3 100755 --- a/gbpservice/nfp/service_vendor_agents/vyos/auth_pam_domain_verify.pl +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/auth_pam_domain_verify.pl @@ -1,5 +1,17 @@ #!/usr/bin/perl +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + use Data::Dumper; local $Data::Dumper::Terse =1; use JSON; @@ -73,7 +85,7 @@ sub read_auth_server_conf { # Get auth server conf from file - my $AUTH_SERVER_CONF_FILE = "/usr/share/vyos-oc/auth_server.conf"; + my $AUTH_SERVER_CONF_FILE = "/usr/share/vyos/auth_server.conf"; if (!open (AUTHFILE, $AUTH_SERVER_CONF_FILE)) { print "Could not open auth file : $AUTH_SERVER_CONF_FILE\n"; @@ -93,7 +105,6 @@ sub read_auth_server_conf { chomp $REMOTE_VPN_ROLE_NAME; chomp $PROJECT_ID; - #$DB::single = 1; close(AUTHFILE); } @@ -131,16 +142,13 @@ sub read_username_passwd { sub get_cloud_admin_token { - #$DB::single = 1; my $http_req = HTTP::Request->new(POST => $url_get_admin_token); $http_req->header('content-type' => 'application/json'); $get_admin_token_data->{"auth"}{"identity"}{"password"}{"user"}{"name"} = $cloud_admin_username; $get_admin_token_data->{"auth"}{"identity"}{"password"}{"user"}{"password"} = $cloud_admin_password; - #$get_admin_token_data->{"auth"}{"scope"}{"project"}{"name"} = $cloud_admin_projname; $json_string = to_json($get_admin_token_data); $http_req->content($json_string); - #$http_req->content($get_admin_token_data); my $http_resp = $httpclient->request($http_req); if ($http_resp->is_success) { my $message = $http_resp->decoded_content; @@ -157,7 +165,6 @@ sub get_cloud_admin_token { sub get_domain_id { my $http_req = HTTP::Request->new(GET => $url_get_domain); - #$DB::single = 1; $http_req->header('content-type' => 'application/json'); $http_req->header('x-auth-token' => $admin_token_id); @@ -177,7 +184,6 @@ sub get_domain_id { sub get_role_id { my $http_req = HTTP::Request->new(GET => $url_get_role_id); - #$DB::single = 1; $http_req->header('content-type' => 'application/json'); $http_req->header('x-auth-token' => $admin_token_id); @@ -210,7 +216,6 @@ sub user_authenticate { if ($http_resp->is_success) { my $message = $http_resp->decoded_content; - #$DB::single = 1; my $decoded_resp = decode_json($message); $user_token_id = $http_resp->headers->{'x-subject-token'}; $user_id = $decoded_resp->{'token'}->{'user'}->{'id'}; @@ -227,7 +232,6 @@ sub user_authenticate { sub get_user_roles { $url_get_role_assignment = $KEYSTONE_AUTH_URL . "/v3/role_assignments?user.id=$user_id&role.id=$user_role_id"; my $http_req = HTTP::Request->new(GET => $url_get_role_assignment); - #$DB::single = 1; $http_req->header('content-type' => 'application/json'); $http_req->header('x-auth-token' => $admin_token_id); @@ -235,7 +239,6 @@ sub get_user_roles { if ($http_resp->is_success) { my $message = $http_resp->decoded_content; my $decoded_resp = decode_json($message); - #$DB::single = 1; my $user_roles = $decoded_resp->{'role_assignments'}; my $len = @{$user_roles}; if ($len) { diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/src/auth_server.conf b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/auth_server.conf new file mode 100755 index 0000000000..c78bdbcb15 --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/auth_server.conf @@ -0,0 +1,18 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +http://10.30.120.97:5000/ +services +neutron +noir0123 +vpn +45fe9bb731054eb4acdae8e15d48a562 diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/__init__.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/__init__.py similarity index 100% rename from gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/__init__.py rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/__init__.py diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/edit_persistent_rule.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/edit_persistent_rule.py similarity index 83% rename from gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/edit_persistent_rule.py rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/edit_persistent_rule.py index db45c0e8ea..1f5a46fae6 100644 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/edit_persistent_rule.py +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/edit_persistent_rule.py @@ -1,7 +1,19 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging from subprocess import call + import netifaces -import logging -from vyos_dhc import initiate_dhclient from vyos_session import utils logger = logging.getLogger(__name__) @@ -12,14 +24,13 @@ class EditPersistentRule(object): + def __init__(self): pass def add(self, mac_info): provider_rule, stitching_rule, interface_list = self.get_rule(mac_info) self.clean_stale_rules(interface_list) - # line = ADD_RULE % (mac, interface) - # initiate_dhclient() self.delete(mac_info) try: call("sudo chown vyos: " @@ -78,11 +89,6 @@ def clean_stale_rules(self, interface_list): cmd = 'sudo sed -i /%s/d %s' % ( interface, INTERFACE_RULE_FILE) call(cmd.split()) - except Exception, err: + except Exception as err: logger.error("ERROR deleting stale persistent rule. Interfaces: " "%r . Details: %r" % (interface_list, str(err))) - - - - - diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/fw_constants.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/fw_constants.py new file mode 100644 index 0000000000..8aad936840 --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/fw_constants.py @@ -0,0 +1,15 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +ACTIONS = ["drop", "reject", "accept", "inspect"] +STATE = ["established", "invalid", "related"] +AVAILABILITY = ["enable", "disable"] diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/oc_fw_module.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/fw_module.py similarity index 72% rename from gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/oc_fw_module.py rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/fw_module.py index b2963430a0..95b3da56cf 100644 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/oc_fw_module.py +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/fw_module.py @@ -1,50 +1,51 @@ -# One Convergence, Inc. CONFIDENTIAL -# Copyright (c) 2012-2015, One Convergence, Inc., USA -# All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# All information contained herein is, and remains the property of -# One Convergence, Inc. and its suppliers, if any. The intellectual and -# technical concepts contained herein are proprietary to One Convergence, -# Inc. and its suppliers. +# http://www.apache.org/licenses/LICENSE-2.0 # -# Dissemination of this information or reproduction of this material is -# strictly forbidden unless prior written permission is obtained from -# One Convergence, Inc., USA +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. -#!/usr/bin/env python +import ast +import json as jsonutils import logging -import json import netifaces import time -import fw_constants -import ast -from operations import configOpts -from vyos_session import utils -from netifaces import AF_INET, AF_INET6, AF_LINK, AF_PACKET, AF_BRIDGE + from execformat.executor import session +from netifaces import AF_LINK +from operations import ConfigOpts +from vyos_session import utils -FWN = 'firewall name' -# oc_fw_identifier = 'oc_fw' +FW_NAME = 'firewall name' rule = 'rule' firewall_rules = { - 'protocol': '%s protocol %s', - 'source_ip_address': '%s source address %s', - 'destination_ip_address': '%s destination address %s', - 'source_port': '%s source port %s', - 'destination_port': '%s destination port %s' - } + 'protocol': '%s protocol %s', + 'source_ip_address': '%s source address %s', + 'destination_ip_address': '%s destination address %s', + 'source_port': '%s source port %s', + 'destination_port': '%s destination port %s' +} firewall_action = {'allow': 'accept', 'deny': 'drop'} logger = logging.getLogger(__name__) utils.init_logger(logger) +''' Firewall module of VyOS agent. -class OCFWConfigClass(configOpts): +''' + + +class VyosFWConfig(ConfigOpts): def __init__(self): - super(OCFWConfigClass, self).__init__() - self.oc_fw_identifier = 'oc_fw' + super(VyosFWConfig, self).__init__() + self.fw_identifier = 'fw' self.provider_ptg_interfaces = list() self.rules = list() @@ -68,7 +69,7 @@ def set_up_rule_on_interfaces(self, firewall): """ sorted_rule_list, self.provider_ptg_interfaces = list(), list() - firewall = json.loads(firewall) + firewall = jsonutils.loads(firewall) fw_rule_list = firewall['firewall_rule_list'] logger.info("Initiating firewall - %s build. of Tenant: %s" % ( firewall['id'], firewall['tenant_id'])) @@ -94,7 +95,7 @@ def set_up_rule_on_interfaces(self, firewall): # before on the interface. Need to evaluate side effect of this method. try: self._ensure_clean_interface() - except: + except Exception: pass self.rules = list() self.add_common_rule() @@ -123,12 +124,11 @@ def set_up_rule_on_interfaces(self, firewall): session.teardown_config_session() def add_common_rule(self): - self.oc_fw_identifier = ('oc_fw' + '_' + - self.provider_ptg_interfaces[0]) - default_action = (FWN + ' ' + self.oc_fw_identifier + + self.fw_identifier = ('fw' + '_' + self.provider_ptg_interfaces[0]) + default_action = (FW_NAME + ' ' + self.fw_identifier + ' default-action drop' ) - common_fw_rule_prefix = (FWN + ' ' + self.oc_fw_identifier + ' ' + + common_fw_rule_prefix = (FW_NAME + ' ' + self.fw_identifier + ' ' + rule + ' 10') accept_action = (common_fw_rule_prefix + ' action accept') established_action = (common_fw_rule_prefix + @@ -145,7 +145,7 @@ def create_vyos_fw_rule(self, fw_rule): position = str(int(fw_rule.get('position', '100')) + 10) if position < 1: position *= 10 - common_fw_rule_prefix = (FWN + ' ' + self.oc_fw_identifier + ' ' + + common_fw_rule_prefix = (FW_NAME + ' ' + self.fw_identifier + ' ' + rule + ' ' + position) self.rules.append(common_fw_rule_prefix) self.rules.append(''.join([common_fw_rule_prefix, ' action %s' % @@ -155,8 +155,8 @@ def create_vyos_fw_rule(self, fw_rule): self.rules.extend( [firewall_rules[k] % (common_fw_rule_prefix, fw_rule[k] - if k not in ['source_port', 'destination_port'] - else fw_rule[k].replace(':', '-')) + if k not in ['source_port', 'destination_port'] + else fw_rule[k].replace(':', '-')) for k, v in fw_rule.iteritems() if fw_rule[k] and k in firewall_rules] ) @@ -167,23 +167,16 @@ def create_vyos_fw_rule(self, fw_rule): raise Exception(err) def configure_interfaces(self): - if fw_constants.intercloud: - # TODO(Vikash) Its not always the bridge will have same name every - # time. Its only for intercloud - interface_conf = ("interfaces bridge br0 firewall in name " + - self.oc_fw_identifier) + # It would be always 1 for now. + for interface in self.provider_ptg_interfaces: + if interface.lower() == 'lo': + continue + interface_conf = ('interfaces ethernet ' + interface + ' ' + + 'firewall out name ' + self.fw_identifier) self.rules += [interface_conf] - else: - # It would be always 1 for now. - for interface in self.provider_ptg_interfaces: - if interface.lower() == 'lo': - continue - interface_conf = ('interfaces ethernet ' + interface + ' ' + - 'firewall out name ' + self.oc_fw_identifier) - self.rules += [interface_conf] def reset_firewall(self, firewall): - fw_data = json.loads(firewall) + fw_data = jsonutils.loads(firewall) try: self.set_provider_interface(fw_data) except Exception as err: @@ -201,29 +194,19 @@ def reset_firewall(self, firewall): session.setup_config_session() - if fw_constants.intercloud: - bridge_rule = ("interfaces bridge br0 firewall in name " + - self.oc_fw_identifier) - try: - self.delete(bridge_rule.split()) - except Exception as err: - msg = (" Rule deletion on bridge failed - %s " % str( - err)) - logger.error(msg) - raise Exception(msg, 400, dict(delete_success=False)) - else: - del_interface_rule = ( - 'interfaces ethernet ' + self.provider_ptg_interfaces[0] + - ' ' + 'firewall') - try: - self.delete(del_interface_rule.split()) - except Exception as err: - session.discard() - session.teardown_config_session() - msg = ("Rule deletion on interface %s failed. ERROR: %s " % - (self.provider_ptg_interfaces[0], str(err))) - logger.error(msg) - raise Exception(msg, 400, dict(delete_success=False)) + del_interface_rule = ( + 'interfaces ethernet ' + self.provider_ptg_interfaces[0] + + ' ' + 'firewall') + try: + self.delete(del_interface_rule.split()) + except Exception as err: + session.discard() + session.teardown_config_session() + msg = ("Rule deletion on interface %s failed. ERROR: %s " % + (self.provider_ptg_interfaces[0], str(err))) + logger.error(msg) + raise Exception(msg, 400, dict(delete_success=False)) + try: session.commit() except Exception as err: @@ -236,9 +219,8 @@ def reset_firewall(self, firewall): # sleep for 2 sec. Got removed in last merge. time.sleep(2) - self.oc_fw_identifier = ('oc_fw' + '_' + - self.provider_ptg_interfaces[0]) - del_firewall = FWN + ' ' + self.oc_fw_identifier + self.fw_identifier = ('fw' + '_' + self.provider_ptg_interfaces[0]) + del_firewall = FW_NAME + ' ' + self.fw_identifier try: self.delete(del_firewall.split()) except Exception as err: @@ -298,8 +280,7 @@ def set_provider_interface(self, firewall): for interface in interfaces: # IPV4 support only # (Fixme) what in the case of aliasing? - # ip = netifaces.ifaddresses(interface)[AF_INET][0]['addr'] - # TODO (Vikash) Not reqd for L2 , need to revisit for L3 + # TODO(Vikash) Not reqd for L2 , need to revisit for L3 # vpn tunnel interface for ssl vpn does not have a mac address physical_interface = netifaces.ifaddresses(interface).get(AF_LINK) if not physical_interface: @@ -326,9 +307,8 @@ def _ensure_clean_interface(self): del_interface_rule = ( 'interfaces ethernet ' + self.provider_ptg_interfaces[0] + ' ' + 'firewall') - self.oc_fw_identifier = ('oc_fw' + '_' + - self.provider_ptg_interfaces[0]) - del_firewall = FWN + ' ' + self.oc_fw_identifier + self.fw_identifier = ('fw' + '_' + self.provider_ptg_interfaces[0]) + del_firewall = FW_NAME + ' ' + self.fw_identifier try: self.delete(del_interface_rule.split()) # delete firewall @@ -346,12 +326,11 @@ def run_sshd_on_mgmt_ip(self, mgmt_ip): self.set(command.split()) try: session.commit() - except: - logger.error("Failed to update sshd listen-address to %s" % - mgmt_ip) + except Exception as err: + logger.error("Failed to update sshd listen-address " + "to %s. Reason: %r" % (mgmt_ip, err)) session.discard() session.teardown_config_session() return session.save() session.teardown_config_session() - diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/interface_monitor.sh b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/interface_monitor.sh new file mode 100755 index 0000000000..5d61513469 --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/interface_monitor.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + function enumerate_net_interfaces { + + echo `date` `ip addr` >> /var/log/oc/vyos_monitor + echo "\n" + echo `date` `sudo netstat -pantl | grep 8888` >>/var/log/oc/vyos_monitor + } + + enumerate_net_interfaces + diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/log_forwarder.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/log_forwarder.py new file mode 100644 index 0000000000..04f5b87ce2 --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/log_forwarder.py @@ -0,0 +1,56 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import subprocess + +from vyos_session import utils + +SUCCESS = True +FAILED = False + +logger = logging.getLogger(__name__) +utils.init_logger(logger) + + +class APIHandler(object): + + def __init__(self): + pass + + def run_command(self, command): + proc = subprocess.Popen(command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + out, err = proc.communicate() + if err: + logger.error("Unable to run command %s, ERROR- %s" % + (command, err)) + return None + return out + + def configure_rsyslog_as_client(self, config): + command = ("/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper begin " + "/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper set system " + "syslog host %s facility all level %s" + "/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper commit" + "/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper save" % ( + config['server_ip'], config['log_level'])) + + try: + self.run_command(command) + return SUCCESS + except Exception as ex: + logger.error("Error while configuring rsyslog as client. %s" % ex) + return FAILED diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/operations.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/operations.py new file mode 100755 index 0000000000..324e7e26f2 --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/operations.py @@ -0,0 +1,65 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import os +import sys + +from execformat.executor import execUtils +from execformat.executor import OperationFailed +from vyos_session import utils + +topdir = os.path.dirname(os.path.realpath(__file__)) + "../.." +topdir = os.path.realpath(topdir) +sys.path.insert(0, topdir) + +logger = logging.getLogger(__name__) +utils.init_logger(logger) + + +class ConfigOpts(object): + + def __init__(self): + pass + + def set_full(self, args): + exe = execUtils(list(args)) + exe.execmd() + + def delete_full(self, args): + exe = execUtils(list(args)) + exe.execmd() + + def show(self, args): + exe = execUtils(list(args)) + res, output = exe.execmd(nonsession=True) + return res, output + + def set(self, args): + args.insert(0, 'set') + exe = execUtils(list(args)) + try: + exe.execmd() + return True + except OperationFailed as e: + logger.error(e.message) + return False + + def delete(self, args): + args.insert(0, 'delete') + exe = execUtils(list(args)) + try: + exe.execmd() + return True + except OperationFailed as e: + logger.error(e.message) + return False diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/routes_config_handler.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/routes_config_handler.py similarity index 58% rename from gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/routes_config_handler.py rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/routes_config_handler.py index 28155768fb..224d77ba66 100644 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/routes_config_handler.py +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/routes_config_handler.py @@ -1,8 +1,20 @@ -import json +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json as jsonutils import logging +import netaddr import netifaces import subprocess -import netaddr import time from vyos_session import utils @@ -14,45 +26,46 @@ class RoutesConfigHandler(object): + def __init__(self): super(RoutesConfigHandler, self).__init__() def add_source_route(self, routes_info): - routes_info = json.loads(routes_info) + routes_info = jsonutils.loads(routes_info) for route_info in routes_info: source_cidr = route_info['source_cidr'] gateway_ip = route_info['gateway_ip'] source_interface = self._get_if_name_by_cidr(source_cidr) try: - interface_number_string = source_interface.split("eth",1)[1] + interface_number_string = source_interface.split("eth", 1)[1] except IndexError: logger.error("Retrieved wrong interface %s for configuring " - "routes" %(source_interface)) + "routes" % (source_interface)) routing_table_number = ROUTING_TABLE_BASE + int( interface_number_string.split('v')[0]) - ip_rule_command = "ip rule add from %s table %s" %( + ip_rule_command = "ip rule add from %s table %s" % ( source_cidr, routing_table_number) out1 = subprocess.Popen(ip_rule_command, shell=True, stdout=subprocess.PIPE).stdout.read() - ip_rule_command = "ip rule add to %s table main" %(source_cidr) + ip_rule_command = "ip rule add to %s table main" % (source_cidr) out2 = subprocess.Popen(ip_rule_command, shell=True, stdout=subprocess.PIPE).stdout.read() - ip_route_command = "ip route add table %s default via %s" %( - routing_table_number, gateway_ip) + ip_route_command = "ip route add table %s default via %s" % ( + routing_table_number, gateway_ip) out3 = self._add_default_route_in_table(ip_route_command, routing_table_number) - output = "%s\n%s\n%s" %(out1, out2, out3) - logger.info("Static route configuration result: %s" %(output)) - return json.dumps(dict(status=True)) + output = "%s\n%s\n%s" % (out1, out2, out3) + logger.info("Static route configuration result: %s" % (output)) + return jsonutils.dumps(dict(status=True)) def _del_default_route_in_table(self, table): - route_del_command = "ip route del table %s default" %(table) + route_del_command = "ip route del table %s default" % (table) command_pipe = subprocess.Popen(route_del_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = command_pipe.communicate() if command_pipe.returncode != 0: - logger.error("Deleting default route failed: %s" %(err)) + logger.error("Deleting default route failed: %s" % (err)) def _add_default_route_in_table(self, route_cmd, table): command_pipe = subprocess.Popen(route_cmd, shell=True, @@ -64,8 +77,8 @@ def _add_default_route_in_table(self, route_cmd, table): if "File exists" in err: self._del_default_route_in_table(table) else: - logger.error("Adding default route failed: %s" %(route_cmd)) - logger.error("Error: %s" %(err)) + logger.error("Adding default route failed: %s" % (route_cmd)) + logger.error("Error: %s" % (err)) raise Exception("Setting Default Table route failed") else: return out @@ -75,8 +88,8 @@ def _add_default_route_in_table(self, route_cmd, table): stderr=subprocess.PIPE) out, err = command_pipe.communicate() if command_pipe.returncode != 0: - logger.error("Adding default route failed: %s" %(route_cmd)) - logger.error("Error: %s" %(err)) + logger.error("Adding default route failed: %s" % (route_cmd)) + logger.error("Error: %s" % (err)) raise Exception("Setting Default Table route failed") else: return out @@ -84,7 +97,7 @@ def _add_default_route_in_table(self, route_cmd, table): def _delete_ip_rule(self, cidr): count = 0 for direction in ["from", "to"]: - ip_rule_cmd = "ip rule del %s %s" %(direction, cidr) + ip_rule_cmd = "ip rule del %s %s" % (direction, cidr) while True: command_pipe = subprocess.Popen(ip_rule_cmd, shell=True, stdout=subprocess.PIPE, @@ -98,86 +111,30 @@ def _delete_ip_rule(self, cidr): if count >= 10: logger.error("Deleting policy based routing for CIDR: " "%s not completed even after 10 attempts" - %(cidr)) - break - - def _del_default_route_in_table(self, table): - route_del_command = "ip route del table %s default" %(table) - command_pipe = subprocess.Popen(route_del_command, shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - out, err = command_pipe.communicate() - if command_pipe.returncode != 0: - logger.error("Deleting default route failed: %s" %(err)) - - def _add_default_route_in_table(self, route_cmd, table): - command_pipe = subprocess.Popen(route_cmd, shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - out, err = command_pipe.communicate() - # Delete the existing default route if any and retry - if command_pipe.returncode != 0: - if "File exists" in err: - self._del_default_route_in_table(table) - else: - logger.error("Adding default route failed: %s" %(route_cmd)) - logger.error("Error: %s" %(err)) - raise Exception("Setting Default Table route failed") - else: - return out - - command_pipe = subprocess.Popen(route_cmd, shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - out, err = command_pipe.communicate() - if command_pipe.returncode != 0: - logger.error("Adding default route failed: %s" %(route_cmd)) - logger.error("Error: %s" %(err)) - raise Exception("Setting Default Table route failed") - else: - return out - - def _delete_ip_rule(self, cidr): - count = 0 - for direction in ["from", "to"]: - ip_rule_cmd = "ip rule del %s %s" %(direction, cidr) - while True: - command_pipe = subprocess.Popen(ip_rule_cmd, shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - out, err = command_pipe.communicate() - # Delete the existing default route if any and retry - if command_pipe.returncode != 0 and "No such file" in err: - break - else: - count = count + 1 - if count >= 10: - logger.error("Deleting policy based routing for CIDR: " - "%s not completed even after 10 attempts" - %(cidr)) + % (cidr)) break # REVISIT(Magesh): There may be a chance that there are duplicate rules # May have to do a list and cleanup multiple entries def delete_source_route(self, routes_info): - routes_info = json.loads(routes_info) + routes_info = jsonutils.loads(routes_info) for route_info in routes_info: source_cidr = route_info['source_cidr'] source_interface = self._get_if_name_by_cidr(source_cidr) try: - interface_number_string = source_interface.split("eth",1)[1] + interface_number_string = source_interface.split("eth", 1)[1] except IndexError: logger.error("Retrieved wrong interface %s for deleting routes" - %(source_interface)) + % (source_interface)) routing_table_number = ROUTING_TABLE_BASE + int( - interface_number_string.split('v')[0]) + interface_number_string.split('v')[0]) self._delete_ip_rule(source_cidr) - ip_route_command = "ip route del table %s default" %( + ip_route_command = "ip route del table %s default" % ( routing_table_number) out = subprocess.Popen(ip_route_command, shell=True, - stdout=subprocess.PIPE).stdout.read() - logger.info("Static route delete result: %s" %(out)) - return json.dumps(dict(status=True)) + stdout=subprocess.PIPE).stdout.read() + logger.info("Static route delete result: %s" % (out)) + return jsonutils.dumps(dict(status=True)) def _get_if_name_by_cidr(self, cidr): interfaces = netifaces.interfaces() @@ -193,11 +150,12 @@ def _get_if_name_by_cidr(self, cidr): netmask = inet_info.get('netmask') ip_address = inet_info.get('addr') subnet_prefix = cidr.split("/") - if (ip_address == subnet_prefix[0] and - (len(subnet_prefix) == 1 or subnet_prefix[1] == "32")): + if (ip_address == subnet_prefix[0] and ( + len(subnet_prefix) == 1 or subnet_prefix[ + 1] == "32")): return interface - ip_address_netmask = '%s/%s' %(ip_address, netmask) - interface_cidr = netaddr.IPNetwork(ip_address_netmask) + ip_address_netmask = '%s/%s' % (ip_address, netmask) + interface_cidr = netaddr.IPNetwork(ip_address_netmask) if str(interface_cidr.cidr) == cidr: return interface # Sometimes the hotplugged interface takes time to get IP diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/server2.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/server.py similarity index 54% rename from gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/server2.py rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/server.py index 53b0fdeff5..0aee5509b9 100644 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/server2.py +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/server.py @@ -1,50 +1,47 @@ -# One Convergence, Inc. CONFIDENTIAL -# Copyright (c) 2012-2015, One Convergence, Inc., USA -# All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# All information contained herein is, and remains the property of -# One Convergence, Inc. and its suppliers, if any. The intellectual and -# technical concepts contained herein are proprietary to One Convergence, -# Inc. and its suppliers. +# http://www.apache.org/licenses/LICENSE-2.0 # -# Dissemination of this information or reproduction of this material is -# strictly forbidden unless prior written permission is obtained from -# One Convergence, Inc., USA +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. -import sys +import ast +import json as jsonutils +import logging +import netifaces import os -import json import signal -import logging -import ast +import sys import time -from os.path import abspath, dirname -import netifaces +from os.path import abspath +from os.path import dirname -sys.path.insert(0, dirname(dirname(abspath(__file__)))) -from vyos_session.utils import init_logger -from oc_fw_module import OCFWConfigClass from edit_persistent_rule import EditPersistentRule +from flask import Flask +from flask import jsonify +from flask import request +from fw_module import VyosFWConfig +from log_forwarder import APIHandler as apihandler from static_ip import StaticIp -from flask import Flask, request -from os.path import abspath, dirname from vpn_api_server import VPNHandler as vpnhandler +from vyos_exception import VyosException from vyos_policy_based_routes import RoutesConfigHandler as routes_handler -from ha_config import VYOSHAConfig -from vyos_exception import OCException -from flask import jsonify -from log_forwarder import APIHandler as apihandler -from stats_parser import APIHandler as stats_apihandler -# sys.path.insert(0, dirname(dirname(abspath(__file__)))) -# sys.path.insert(0, (abspath(__file__))) +from vyos_session.utils import init_logger + +sys.path.insert(0, dirname(dirname(abspath(__file__)))) logger = logging.getLogger(__name__) init_logger(logger) app = Flask(__name__) -oc_fw_module = None +fw_module = None e = EditPersistentRule() error_msgs = { @@ -54,8 +51,8 @@ @app.route('/auth-server-config', methods=['POST']) def auth_server_config(): - data = json.loads(request.data) - f = open("/usr/share/vyos-oc/auth_server.conf", 'w') + data = jsonutils.loads(request.data) + f = open("/usr/share/vyos/auth_server.conf", 'w') f.write(data['auth_uri']) f.write('\n') f.write(data['admin_tenant_name']) @@ -70,26 +67,25 @@ def auth_server_config(): f.write("\n") try: - host_ip = data['host_mapping'].split()[0]+"/32" - command = 'grep "new_routers" /var/lib/dhcp3/dhclient_eth0_lease |tail -1| cut -d: -d "=" -f2' + host_ip = data['host_mapping'].split()[0] + "/32" + command = ('grep "new_routers" /var/lib/dhcp3/dhclient_eth0_lease' + ' |tail -1| cut -d: -d "=" -f2') gateway_ip = os.popen(command).read().strip().strip("'") - status = vpnhandler().configure_static_route("set", host_ip, gateway_ip) + vpnhandler().configure_static_route("set", host_ip, gateway_ip) except Exception as ex: err = ("Error in adding rvpn route. Reason: %s" % ex) logger.error(err) - return json.dumps(dict(status=False, reason=err)) + return jsonutils.dumps(dict(status=False, reason=err)) try: if data['host_mapping'].split()[1]: os.system("sudo chown vyos:users /etc/hosts") os.system("sudo echo '\n%s' >> /etc/hosts" % data['host_mapping']) os.system("sudo chown root:root /etc/hosts") - #with open('/etc/hosts', 'a') as hosts: - # hosts.write(data['host_mapping']) except Exception as e: logger.error("Error in writing host mapping in /etc/hosts - %s" % e) - return json.dumps(dict(status=True)) + return jsonutils.dumps(dict(status=True)) @app.route('/create-ipsec-site-conn', methods=['POST']) @@ -100,13 +96,13 @@ def create_ipsec_site_conn(): "commit" the changes """ try: - data = json.loads(request.data) + data = jsonutils.loads(request.data) status = vpnhandler().create_ipsec_site_conn(data) - return json.dumps(dict(status=status)) + return jsonutils.dumps(dict(status=status)) except Exception as ex: err = "Error in configuring ipsec_site_conection. Reason: %s" % ex logger.error(err) - return json.dumps(dict(status=False, reason=err)) + return jsonutils.dumps(dict(status=False, reason=err)) @app.route('/create-ipsec-site-tunnel', methods=['POST']) @@ -117,16 +113,16 @@ def create_ipsec_site_tunnel(): "commit" the changes """ try: - tunnel = json.loads(request.data) + tunnel = jsonutils.loads(request.data) pcidrs = tunnel['peer_cidrs'] for pcidr in pcidrs: tunnel['peer_cidr'] = pcidr status = vpnhandler().create_ipsec_site_tunnel(tunnel) - return json.dumps(dict(status=status)) + return jsonutils.dumps(dict(status=status)) except Exception as ex: err = ("Error in configuring ipsec_site_tunnel. Reason: %s" % ex) logger.error(err) - return json.dumps(dict(status=False, reason=err)) + return jsonutils.dumps(dict(status=False, reason=err)) @app.route('/delete-ipsec-site-tunnel', methods=['DELETE']) @@ -142,11 +138,11 @@ def delete_ipsec_site_tunnel(): tunnel['local_cidr'] = local_cidr tunnel['peer_cidr'] = pcidr status = vpnhandler().delete_ipsec_site_tunnel(tunnel) - return json.dumps(dict(status=status)) + return jsonutils.dumps(dict(status=status)) except Exception as ex: err = ("Error in deleting ipsec_site_tunnel. Reason: %s" % ex) logger.error(err) - return json.dumps(dict(status=False, reason=err)) + return jsonutils.dumps(dict(status=False, reason=err)) @app.route('/delete-ipsec-site-conn', methods=['DELETE']) @@ -154,11 +150,11 @@ def delete_ipsec_site_conn(): try: peer_address = request.args.get('peer_address') status = vpnhandler().delete_ipsec_site_conn(peer_address) - return json.dumps(dict(status=status)) + return jsonutils.dumps(dict(status=status)) except Exception as ex: err = ("Error in deleting ipsec_site_connection. Reason: %s" % ex) logger.error(err) - return json.dumps(dict(status=False, reason=err)) + return jsonutils.dumps(dict(status=False, reason=err)) @app.route('/get-ipsec-site-tunnel-state', methods=['GET']) @@ -172,35 +168,35 @@ def get_ipsec_site_tunnel_state(): tunnel['local_cidr'] = lcidr tunnel['peer_cidr'] = pcidr status, state = vpnhandler().get_ipsec_site_tunnel_state(tunnel) - return json.dumps(dict(state=state)) + return jsonutils.dumps(dict(state=state)) except Exception as ex: err = ("Error in get_ipsec_site_tunnel_state. Reason: %s" % ex) logger.error(err) - return json.dumps(dict(status=False, reason=err)) + return jsonutils.dumps(dict(status=False, reason=err)) @app.route('/create-ssl-vpn-conn', methods=['POST']) def create_ssl_vpn_conn(): try: - data = json.loads(request.data) + data = jsonutils.loads(request.data) status = vpnhandler().create_ssl_vpn_conn(data) - return json.dumps(dict(status=status)) + return jsonutils.dumps(dict(status=status)) except Exception as ex: err = ("Error in create_ssl_vpn_connection. Reason: %s" % ex) logger.error(err) - return json.dumps(dict(status=False, reason=err)) + return jsonutils.dumps(dict(status=False, reason=err)) @app.route('/ssl-vpn-push-route', methods=['POST']) def ssl_vpn_push_route(): try: - data = json.loads(request.data) + data = jsonutils.loads(request.data) status = vpnhandler().ssl_vpn_push_route(data) - return json.dumps(dict(status=status)) + return jsonutils.dumps(dict(status=status)) except Exception as ex: err = ("Error in ssl_vpn_push_route. Reason: %s" % ex) logger.error(err) - return json.dumps(dict(status=False, reason=err)) + return jsonutils.dumps(dict(status=False, reason=err)) @app.route('/delete-ssl-vpn-conn', methods=['DELETE']) @@ -208,11 +204,11 @@ def delete_ssl_vpn_conn(): try: tunnel_name = request.args.get('tunnel') status = vpnhandler().delete_ssl_vpn_conn(tunnel_name) - return json.dumps(dict(status=status)) + return jsonutils.dumps(dict(status=status)) except Exception as ex: err = ("Error in delete_ssl_vpn_conn. Reason: %s" % ex) logger.error(err) - return json.dumps(dict(status=False, reason=err)) + return jsonutils.dumps(dict(status=False, reason=err)) @app.route('/delete-ssl-vpn-route', methods=['DELETE']) @@ -220,11 +216,11 @@ def delete_ssl_vpn_route(): try: route = request.args.get('route') status = vpnhandler().delete_ssl_vpn_route(route) - return json.dumps(dict(status=status)) + return jsonutils.dumps(dict(status=status)) except Exception as ex: err = ("Error in delete_ssl_vpn_route. Reason: %s" % ex) logger.error(err) - return json.dumps(dict(status=False, reason=err)) + return jsonutils.dumps(dict(status=False, reason=err)) @app.route('/get-ssl-vpn-conn-state', methods=['GET']) @@ -232,65 +228,65 @@ def get_ssl_vpn_conn_state(): try: tunnel_name = request.args.get('tunnel') status, state = vpnhandler().get_ssl_vpn_conn_state(tunnel_name) - return json.dumps(dict(status=status, state=state)) + return jsonutils.dumps(dict(status=status, state=state)) except Exception as ex: err = ("Error in get_ssl_vpn_conn_state. Reason: %s" % ex) logger.error(err) - return json.dumps(dict(status=False, reason=err)) + return jsonutils.dumps(dict(status=False, reason=err)) @app.route('/configure-firewall-rule', methods=['POST']) def configure_firewall_rule(): - global oc_fw_module + global fw_module firewall_data = request.data try: - response = oc_fw_module.set_up_rule_on_interfaces(firewall_data) + response = fw_module.set_up_rule_on_interfaces(firewall_data) except Exception as err: try: - return send_error_response(OCException(err[0], status_code=err[1], - payload=err[2])) + return send_error_response(VyosException( + err[0], status_code=err[1], payload=err[2])) except IndexError: return send_error_response( - OCException(str(err), status_code=500, - payload=dict(err=error_msgs['unexpected'] % ( - 'configuring', 'firewall')))) + VyosException(str(err), status_code=500, + payload=dict(err=error_msgs['unexpected'] % ( + 'configuring', 'firewall')))) else: return jsonify(**response) @app.route('/delete-firewall-rule', methods=['DELETE']) def delete_firewall_rule(): - global oc_fw_module + global fw_module try: - response = oc_fw_module.reset_firewall(request.data) + response = fw_module.reset_firewall(request.data) except Exception as err: try: - return send_error_response(OCException(err[0], status_code=err[1], - payload=err[2])) + return send_error_response(VyosException( + err[0], status_code=err[1], payload=err[2])) except IndexError: return send_error_response( - OCException(str(err), status_code=500, - payload=dict(err=error_msgs['unexpected'] % ( - 'deleting', 'firewall')))) + VyosException(str(err), status_code=500, + payload=dict(err=error_msgs['unexpected'] % ( + 'deleting', 'firewall')))) else: return jsonify(**response) @app.route('/update-firewall-rule', methods=['PUT']) def update_firewall_rule(): - global oc_fw_module + global fw_module try: - oc_fw_module.reset_firewall(request.data) - response = oc_fw_module.set_up_rule_on_interfaces(request.data) + fw_module.reset_firewall(request.data) + response = fw_module.set_up_rule_on_interfaces(request.data) except Exception as err: try: - return send_error_response(OCException(err[0], status_code=err[1], - payload=err[2])) + return send_error_response(VyosException( + err[0], status_code=err[1], payload=err[2])) except IndexError: return send_error_response( - OCException(str(err), status_code=500, - payload=dict(err=error_msgs['unexpected'] % ( - 'updating', 'firewall')))) + VyosException(str(err), status_code=500, + payload=dict(err=error_msgs['unexpected'] % ( + 'updating', 'firewall')))) else: return jsonify(**response) @@ -302,7 +298,7 @@ def add_source_route(): except Exception as ex: err = ("Exception in adding source route. %s" % ex) logger.error(err) - return json.dumps(dict(status=False, reason=err)) + return jsonutils.dumps(dict(status=False, reason=err)) @app.route('/delete-source-route', methods=['DELETE']) @@ -312,19 +308,20 @@ def delete_source_route(): except Exception as ex: err = ("Exception in deleting source route. %s" % ex) logger.error(err) - return json.dumps(dict(status=False, reason=err)) + return jsonutils.dumps(dict(status=False, reason=err)) @app.route('/add-stitching-route', methods=['POST']) def add_stitching_route(): try: - gateway_ip = json.loads(request.data).get('gateway_ip') - status = vpnhandler().configure_static_route("set", "0.0.0.0/0", gateway_ip) - return json.dumps(dict(status=status)) + gateway_ip = jsonutils.loads(request.data).get('gateway_ip') + status = vpnhandler().configure_static_route("set", "0.0.0.0/0", + gateway_ip) + return jsonutils.dumps(dict(status=status)) except Exception as ex: err = ("Error in add_stitching_route. Reason: %s" % ex) logger.error(err) - return json.dumps(dict(status=False, reason=err)) + return jsonutils.dumps(dict(status=False, reason=err)) @app.route('/delete-stitching-route', methods=['DELETE']) @@ -332,72 +329,14 @@ def delete_stitching_route(): try: gateway_ip = request.args.get('gateway_ip') status = vpnhandler().configure_static_route( - "delete", "0.0.0.0/0", gateway_ip) - return json.dumps(dict(status=status)) + "delete", "0.0.0.0/0", gateway_ip) + return jsonutils.dumps(dict(status=status)) except Exception as ex: err = ("Error in delete_stitching_route. Reason: %s" % ex) logger.error(err) - return json.dumps(dict(status=False, reason=err)) + return jsonutils.dumps(dict(status=False, reason=err)) -@app.route('/configure_conntrack_sync', methods=['POST']) -def configure_conntrack_sync(): - global vyos_ha_config - try: - response = vyos_ha_config.configure_conntrack_sync(request.data) - except Exception as err: - # This flask version has issue in implicit way of registering - # error handler. - try: - return send_error_response(OCException(err[0], status_code=err[1], - payload=err[2])) - except IndexError: - return send_error_response( - OCException(str(err), status_code=500, - payload=dict(err=error_msgs['unexpected'] % ( - 'configuring', 'conntrack sync')))) - else: - return jsonify(**response) - - -@app.route('/configure_interface_ha', methods=['POST']) -def configure_interface_ha(): - global vyos_ha_config - try: - response = vyos_ha_config.set_vrrp_for_interface(request.data) - except Exception as err: - try: - return send_error_response(OCException(err[0], status_code=err[1], - payload=err[2])) - except IndexError: - return send_error_response( - OCException(str(err), status_code=500, - payload=dict( - err=error_msgs['unexpected'] % ( - 'configuring', 'HA for the interface')))) - else: - return jsonify(**response) - - -@app.route('/delete_vrrp', methods=['DELETE']) -def delete_vrrp(): - global vyos_ha_config - try: - response = vyos_ha_config.delete_vrrp(request.data) - except Exception as err: - try: - return send_error_response(OCException(err[0], status_code=err[1], - payload=err[2])) - except IndexError: - return send_error_response( - OCException(str(err), status_code=500, - payload=dict(err=error_msgs['unexpected'] % ( - 'deleting', 'VRRP')))) - else: - return jsonify(**response) - - -# @app.errorhandler(OCException) def send_error_response(error): response = jsonify(error.to_dict()) response.status_code = error.status_code @@ -408,91 +347,70 @@ def send_error_response(error): def add_static_ip(): try: static_ip_obj = StaticIp() - data = json.loads(request.data) + data = jsonutils.loads(request.data) static_ip_obj.configure(data) except Exception as err: msg = ("Error adding static IPs for hotplugged interfaces. " "Data: %r. Error: %r" % (data, str(err))) logger.error(msg) - return json.dumps(dict(status=False, reason=msg)) + return jsonutils.dumps(dict(status=False, reason=msg)) else: - return json.dumps(dict(status=True)) + return jsonutils.dumps(dict(status=True)) @app.route('/del_static_ip', methods=['DELETE']) def del_static_ip(): try: static_ip_obj = StaticIp() - data = json.loads(request.data) + data = jsonutils.loads(request.data) static_ip_obj.clear(data) except Exception as err: msg = ("Error clearing static IPs for hotplugged interfaces. " "Data: %r. Error: %r" % (data, str(err))) logger.error(msg) - return json.dumps(dict(status=False, reason=msg)) + return jsonutils.dumps(dict(status=False, reason=msg)) else: - return json.dumps(dict(status=True)) + return jsonutils.dumps(dict(status=True)) @app.route('/add_rule', methods=['POST']) def add_rule(): # configuring sshd to listen on management ip address ip_addr = get_interface_to_bind() - oc_fw_module.run_sshd_on_mgmt_ip(ip_addr) + fw_module.run_sshd_on_mgmt_ip(ip_addr) - data = json.loads(request.data) + data = jsonutils.loads(request.data) try: EditPersistentRule.add(e, data) except Exception as err: logger.error("Error adding persistent rule %r" % str(err)) - return json.dumps(dict(status=False)) + return jsonutils.dumps(dict(status=False)) else: - return json.dumps(dict(status=True)) + return jsonutils.dumps(dict(status=True)) @app.route('/delete_rule', methods=['DELETE']) def del_rule(): - data = json.loads(request.data) + data = jsonutils.loads(request.data) try: EditPersistentRule.delete(e, data) except Exception as err: logger.error("Error deleting persistent rule %r" % str(err)) - return json.dumps(dict(status=False)) + return jsonutils.dumps(dict(status=False)) else: - return json.dumps(dict(status=True)) + return jsonutils.dumps(dict(status=True)) @app.route('/configure-rsyslog-as-client', methods=['POST']) def configure_rsyslog_as_client(): try: - config_data = json.loads(request.data) + config_data = jsonutils.loads(request.data) status = apihandler().configure_rsyslog_as_client(config_data) - return json.dumps(dict(status=status)) + return jsonutils.dumps(dict(status=status)) except Exception as ex: err = ("Error while conifiguring rsyslog client. Reason: %s" % ex) logger.error(err) - return json.dumps(dict(status=False, reason=err)) - -@app.route('/get-fw-stats', methods=['GET']) -def get_fw_stats(): - try: - mac_address = request.args.get('mac_address') - fw_stats = stats_apihandler().get_fw_stats(mac_address) - return json.dumps(dict(stats=fw_stats)) - except Exception as ex: - err = ("Error while getting firewall stats. Reason: %s" % ex) - logger.error(err) - return json.dumps(dict(status=False, reason=err)) - -@app.route('/get-vpn-stats', methods=['GET']) -def get_vpn_stats(): - try: - vpn_stats = stats_apihandler().get_vpn_stats() - return json.dumps(dict(stats=vpn_stats)) - except Exception as ex: - err = ("Error while getting vpn stats. Reason: %s" % ex) - logger.error(err) - return json.dumps(dict(status=False, reason=err)) + return jsonutils.dumps(dict(status=False, reason=err)) def handler(signum, frame): @@ -503,12 +421,14 @@ def handler(signum, frame): def add_management_pbr(): - command = 'grep "new_routers" /var/lib/dhcp3/dhclient_eth0_lease |tail -1| cut -d: -d "=" -f2' + command = ('grep "new_routers" /var/lib/dhcp3/dhclient_eth0_lease' + ' |tail -1| cut -d: -d "=" -f2') gateway_ip = os.popen(command).read().strip().strip("'") - command = 'grep "new_ip_address" /var/lib/dhcp3/dhclient_eth0_lease |tail -1| cut -d: -d "=" -f2' + command = ('grep "new_ip_address" /var/lib/dhcp3/dhclient_eth0_lease' + ' |tail -1| cut -d: -d "=" -f2') src_ip = os.popen(command).read().strip().strip("'") routes_info = [{'source_cidr': src_ip, 'gateway_ip': gateway_ip}] - routes_handler().add_source_route(json.dumps(routes_info)) + routes_handler().add_source_route(jsonutils.dumps(routes_info)) def getipaddr(): @@ -523,12 +443,12 @@ def get_interface_to_bind(): ip_addr = getipaddr() logger.info("Management interface up on - %r " % ''.join([netifaces.ifaddresses('eth0')[17][0][ - 'addr'][:2], - netifaces.ifaddresses('eth0')[17][0][ - 'addr'][-2:], - netifaces.ifaddresses('eth0')[2][0][ - 'addr'].split('.')[-1] - ])) + 'addr'][:2], + netifaces.ifaddresses('eth0')[17][0][ + 'addr'][-2:], + netifaces.ifaddresses('eth0')[2][0][ + 'addr'].split('.')[-1] + ])) except ValueError: logger.error("Management Interface not UP") time.sleep(5) @@ -545,9 +465,8 @@ def main(): :type ip_addr: Server listen address """ - global oc_fw_module, vyos_ha_config - oc_fw_module = OCFWConfigClass() - vyos_ha_config = VYOSHAConfig() + global fw_module + fw_module = VyosFWConfig() ip_addr = get_interface_to_bind() signal.signal(signal.SIGTERM, handler) signal.signal(signal.SIGINT, handler) @@ -557,4 +476,3 @@ def main(): if __name__ == '__main__': main() - diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/static_ip.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/static_ip.py similarity index 84% rename from gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/static_ip.py rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/static_ip.py index 223f55407a..af8c64d637 100644 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/static_ip.py +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/static_ip.py @@ -1,11 +1,22 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. import logging import netifaces import time -from netifaces import AF_LINK -from operations import configOpts from execformat.executor import session +from netifaces import AF_LINK +from operations import ConfigOpts from vyos_session.utils import init_logger logger = logging.getLogger(__name__) @@ -19,7 +30,8 @@ """ -class StaticIp(configOpts): +class StaticIp(ConfigOpts): + def __init__(self): self.hotplug_timeout = 25 @@ -54,10 +66,10 @@ def _get_interface_name(self, interface_mac): interfaces = netifaces.interfaces() for interface in interfaces: - if netifaces.ifaddresses(interface)[AF_LINK][0]['addr'] == interface_mac: + if netifaces.ifaddresses(interface)[AF_LINK][0][ + 'addr'] == interface_mac: return interface - def configure(self, data): try: session.setup_config_session() @@ -82,11 +94,11 @@ def configure(self, data): self.provider_ptg_interfaces = list() for interface in interfaces: physical_interface = netifaces.ifaddresses( - interface).get(AF_LINK) + interface).get(AF_LINK) if not physical_interface: continue mac_addr = netifaces.ifaddresses( - interface)[AF_LINK][0]['addr'] + interface)[AF_LINK][0]['addr'] if 'eth' in interface: ip_mac_map.update({interface: mac_addr}) @@ -123,11 +135,11 @@ def clear(self, data): self.provider_ptg_interfaces = list() for interface in interfaces: physical_interface = netifaces.ifaddresses( - interface).get(AF_LINK) + interface).get(AF_LINK) if not physical_interface: continue mac_addr = netifaces.ifaddresses( - interface)[AF_LINK][0]['addr'] + interface)[AF_LINK][0]['addr'] if 'eth' in interface: ip_mac_map.update({interface: mac_addr}) diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/vpn_api_server.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/vpn_api_server.py similarity index 86% rename from gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/vpn_api_server.py rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/vpn_api_server.py index e4f7c28c18..aefdc8ec71 100644 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/vpn_api_server.py +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/vpn_api_server.py @@ -1,28 +1,34 @@ -#!/usr/bin/env python +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import array +import copy +import fcntl import logging -import json -import netifaces -import netaddr import socket -import fcntl import struct -import array -import time -import ast -import copy import subprocess -import os -from netaddr import IPNetwork, IPAddress -from operations import configOpts -from vyos_session import utils -from netifaces import AF_INET, AF_INET6, AF_LINK, AF_PACKET, AF_BRIDGE -#from vyos_session.configsession import ConfigSession as session +import time + from execformat.executor import session +from netaddr import IPAddress +from netaddr import IPNetwork +from operations import ConfigOpts +from vyos_session import utils -OP_SUCCESS = True -OP_FAILED = False +SUCCESS = True +FAILED = False -OP_COMMAND_SCRIPT = "/usr/share/vyos-oc/vpn_op_commands.pl" +OP_COMMAND_SCRIPT = "/usr/share/vyos/vpn_op_commands.pl" IPSEC_SITE2SITE_COMMANDS = { 'ike': [ @@ -75,8 +81,7 @@ 'set interfaces openvpn %s server push-route %s', 'set interfaces openvpn %s openvpn-option \ "--client-cert-not-required --script-security 3 \ - --auth-user-pass-verify /usr/share/vyos-oc/auth_pam.pl via-file"'], - #'set interfaces openvpn %s local-host %s'], + --auth-user-pass-verify /usr/share/vyos/auth_pam.pl via-file"'], 'delete': [ 'delete interfaces openvpn %s', 'delete interfaces openvpn vtun0 server push-route %s']} @@ -86,11 +91,13 @@ class NoInterfaceOnCidr(Exception): + def __init__(self, **kwargs): self.message = _("No interface in the network '%(cidr)s'") % kwargs -class VPNHandler(configOpts): +class VPNHandler(ConfigOpts): + def __init__(self): super(VPNHandler, self).__init__() @@ -98,14 +105,14 @@ def create_ipsec_site_conn(self, ctx): session.setup_config_session() siteconn = ctx['siteconns'][0] self._create_ike_group(siteconn['ikepolicy'], - siteconn['connection']['dpd']) + siteconn['connection']['dpd']) self._create_esp_group(siteconn['ipsecpolicy']) self._create_ipsec_site_conn(ctx) session.commit() session.save() time.sleep(2) session.teardown_config_session() - return OP_SUCCESS + return SUCCESS def create_ipsec_site_tunnel(self, tunnel): session.setup_config_session() @@ -114,7 +121,7 @@ def create_ipsec_site_tunnel(self, tunnel): session.save() time.sleep(2) session.teardown_config_session() - return OP_SUCCESS + return SUCCESS def _ipsec_get_tunnel_idx(self, tunnel): command = 'perl' @@ -152,10 +159,10 @@ def delete_ipsec_site_tunnel(self, tunnel): session.save() time.sleep(2) session.teardown_config_session() - return OP_SUCCESS + return SUCCESS except Exception as ex: logger.error("Error in deleting ipsec site tunnel. %s" % ex) - return OP_FAILED + return FAILED def delete_ipsec_site_conn(self, peer_address): try: @@ -165,10 +172,10 @@ def delete_ipsec_site_conn(self, peer_address): session.save() time.sleep(2) session.teardown_config_session() - return OP_SUCCESS + return SUCCESS except Exception as ex: logger.error("Error in deleting ipsec site connection. %s" % ex) - return OP_FAILED + return FAILED def create_ssl_vpn_conn(self, ctx): session.setup_config_session() @@ -177,7 +184,7 @@ def create_ssl_vpn_conn(self, ctx): session.save() time.sleep(2) session.teardown_config_session() - return OP_SUCCESS + return SUCCESS def ssl_vpn_push_route(self, route): session.setup_config_session() @@ -186,7 +193,7 @@ def ssl_vpn_push_route(self, route): session.save() time.sleep(2) session.teardown_config_session() - return OP_SUCCESS + return SUCCESS def delete_ssl_vpn_conn(self, tunnel): session.setup_config_session() @@ -195,7 +202,7 @@ def delete_ssl_vpn_conn(self, tunnel): session.save() time.sleep(2) session.teardown_config_session() - return OP_SUCCESS + return SUCCESS def delete_ssl_vpn_route(self, route): session.setup_config_session() @@ -204,10 +211,10 @@ def delete_ssl_vpn_route(self, route): session.save() time.sleep(2) session.teardown_config_session() - return OP_SUCCESS + return SUCCESS def get_ssl_vpn_conn_state(self, peer_address): - return OP_SUCCESS, 'UP' + return SUCCESS, 'UP' def get_ipsec_site_tunnel_state(self, tunnel): tunidx = self._ipsec_get_tunnel_idx(tunnel) @@ -223,7 +230,7 @@ def get_ipsec_site_tunnel_state(self, tunnel): out, err = proc.communicate() state = out.split('=')[1] state = state[:-1] - return OP_SUCCESS, state + return SUCCESS, state def _delete_ipsec_site_tunnel(self, tunnel): tunidx = self._ipsec_get_tunnel_idx(tunnel) @@ -236,9 +243,6 @@ def _delete_ipsec_site_tunnel(self, tunnel): def _delete_ipsec_site_conn(self, peer_address): cmds = copy.deepcopy(IPSEC_SITE2SITE_COMMANDS) - #cmd = cmds['delete'][0] - - #cmd = cmd % peer_address cmd = cmds['delete'][2] self._set_commands([cmd]) @@ -259,8 +263,8 @@ def _delete_ssl_vpn_route(self, route): def _set_commands(self, cmds): for cmd in cmds: - print cmd - self.set_1(cmd.split(' ')) + logger.debug(cmd) + self.set_full(cmd.split(' ')) def _create_ike_group(self, ike, dpd): cmds = copy.deepcopy(IPSEC_SITE2SITE_COMMANDS) @@ -307,11 +311,6 @@ def _create_ipsec_site_tunnel(self, tunnel): self._set_commands(tun_cmds) - def _get_vrrp_group(self, ifname): - command = ("vbash -c -i 'show vrrp' | grep %s | awk '{print $2}'" % ifname) - #vrrp_ifname = ifname + "v" + os.popen(command).read().strip() - return os.popen(command).read().strip() - def _create_ipsec_site_conn(self, ctx): cmds = copy.deepcopy(IPSEC_SITE2SITE_COMMANDS) conn_cmds = cmds['conn'] @@ -327,17 +326,6 @@ def _create_ipsec_site_conn(self, ctx): esp = ctx['siteconns'][0]['ipsecpolicy'] ike = ctx['siteconns'][0]['ikepolicy'] - vrrp_cmd = None - if conn['stitching_fixed_ip'] and conn.get('standby_fip', None): - logger.debug("Get vrrp group number for interface %s" % ifname) - group_no = self._get_vrrp_group(ifname) - ip = conn['stitching_fixed_ip'] - vrrp_cmd = ('set interfaces ethernet %s vrrp vrrp-group %s ' - 'run-transition-scripts master /config/scripts/restart_vpn') % ( - ifname, group_no) - ifname = ifname + "v" + str(group_no) - logger.info("vrrp interface name: %s" % ifname) - conn_cmds[0] = conn_cmds[0] % (ifname) conn_cmds[1] = conn_cmds[1] % (conn['peer_address']) conn_cmds[2] = conn_cmds[2] % (conn['peer_address'], conn['psk']) @@ -355,8 +343,6 @@ def _create_ipsec_site_conn(self, ctx): conn_cmds[8] = conn_cmds[8] % ( conn['peer_address'], 1, conn['peer_cidrs'][0]) conn_cmds[9] = conn_cmds[9] % (conn['peer_address'], conn['access_ip']) - if vrrp_cmd: - conn_cmds.append(vrrp_cmd) self._set_commands(conn_cmds) @@ -377,7 +363,6 @@ def _create_ssl_vpn_conn(self, ctx): conn_cmds[6] = conn_cmds[6] % ('vtun0') conn_cmds[7] = conn_cmds[7] % ('vtun0', cidr) conn_cmds[8] = conn_cmds[8] % ('vtun0') - #conn_cmds[9] = conn_cmds[9] % ('vtun0', conn['stitching_fixed_ip']) self._set_commands(conn_cmds) @@ -395,19 +380,14 @@ def configure_static_route(self, action, cidr, gateway_ip): route_cmd = ("%s protocols static route %s next-hop" " %s distance 1" % (action, cidr, gateway_ip)) else: - route_cmd = "%s protocols static route %s" %(action, cidr) + route_cmd = "%s protocols static route %s" % (action, cidr) # The config module we use everywhere else is not used here # because of the issue mentioned here: # http://vyatta38.rssing.com/chan-10627532/all_p7.html # Note: The issue is inconsistent, but not seen anymore with this # new approach of setting configuration utils._alternate_set_and_commit(route_cmd) - #session.setup_config_session() - #self._set_commands([route_cmd]) - #session.commit() - #time.sleep(2) - #session.teardown_config_session() - return OP_SUCCESS + return SUCCESS def _get_all_ifs(self): max_possible = 128 # arbitrary. raise if needed. @@ -422,8 +402,8 @@ def _get_all_ifs(self): namestr = names.tostring() lst = [] for i in range(0, outbytes, 40): - name = namestr[i:i+16].split('\0', 1)[0] - ip = namestr[i+20:i+24] + name = namestr[i:i + 16].split('\0', 1)[0] + ip = namestr[i + 20:i + 24] lst.append((name, ip)) return lst diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/vyos-oc-log b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/vyos-log similarity index 78% rename from gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/vyos-oc-log rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/vyos-log index 25c48e9e1a..2e03d44323 100644 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/vyos-oc-log +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/vyos-log @@ -1,4 +1,4 @@ -/var/log/oc/vyos_monitor { +/var/log/vyos/vyos_monitor { weekly rotate 12 size 10M @@ -8,7 +8,7 @@ notifempty create 644 vyos users } -/var/log/oc/oc-vyos.log { +/var/log/vyos/vyos.log { monthly rotate 12 size 10M @@ -18,7 +18,7 @@ notifempty create 644 vyos users postrotate - sudo /etc/init.d/oc-vyos restart + sudo /etc/init.d/vyos restart endscript } diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/vyos_exception.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/vyos_exception.py new file mode 100644 index 0000000000..b8b7c6deaa --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/vyos_exception.py @@ -0,0 +1,29 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class VyosException(Exception): + """ + """ + status_code = 400 + + def __init__(self, message, status_code=None, payload=None): + Exception.__init__(self) + self.message = message + if status_code: + self.status_code = status_code + self.payload = payload + + def to_dict(self): + rv = dict(self.payload or ()) + rv["message"] = self.message + return rv diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/vyos_policy_based_routes.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/vyos_policy_based_routes.py similarity index 77% rename from gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/vyos_policy_based_routes.py rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/vyos_policy_based_routes.py index 3bbdaabb2f..d883c2a12c 100644 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/vyos_policy_based_routes.py +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/config_server/vyos_policy_based_routes.py @@ -1,16 +1,26 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + import copy -import json +import json as jsonutils import logging +import netaddr import netifaces import subprocess -import netaddr import time -from execformat.executor import session -from operations import configOpts +from operations import ConfigOpts from vyos_session import utils - ROUTING_TABLE_BASE = 10 logger = logging.getLogger(__name__) @@ -36,7 +46,8 @@ 'show interfaces ethernet %s policy route']} -class RoutesConfigHandler(configOpts): +class RoutesConfigHandler(ConfigOpts): + def __init__(self): super(RoutesConfigHandler, self).__init__() self.vyos_wrapper = "/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper" @@ -47,13 +58,15 @@ def _run_command(self, command): stdout=subprocess.PIPE, stderr=subprocess.PIPE) except Exception as err: - message = 'Executing command %s failed with error %s' %(command, err) + message = 'Executing command %s failed with error %s' % ( + command, err) logger.error(message) return False cmd_output, cmd_error = exec_pipe.communicate() if exec_pipe.returncode != 0: - message = 'Executing command %s failed with error %s' %(command, cmd_error) + message = 'Executing command %s failed with error %s' % ( + command, cmd_error) logger.error(message) return False else: @@ -61,14 +74,14 @@ def _run_command(self, command): return True def _begin_command(self): - begin_cmd = "%s begin" %(self.vyos_wrapper) + begin_cmd = "%s begin" % (self.vyos_wrapper) if self._run_command(begin_cmd): return True else: return False def _discard_changes(self): - discard_cmd = "%s discard" %(self.vyos_wrapper) + discard_cmd = "%s discard" % (self.vyos_wrapper) if self._run_command(discard_cmd): return True else: @@ -76,13 +89,13 @@ def _discard_changes(self): def _set_commands(self, cmds): for cmd in cmds: - set_cmd = "%s %s" %(self.vyos_wrapper, cmd) + set_cmd = "%s %s" % (self.vyos_wrapper, cmd) if not self._run_command(set_cmd): return False return True def _commit_command(self): - commit_cmd = "%s commit" %(self.vyos_wrapper) + commit_cmd = "%s commit" % (self.vyos_wrapper) if self._run_command(commit_cmd): return True else: @@ -101,17 +114,17 @@ def _configure_vyos(self, configure_commands): return False if not self._set_commands(configure_commands): - logger.error("Executing commands %s failed" %(configure_commands)) + logger.error("Executing commands %s failed" % (configure_commands)) self._discard_changes() return False if not self._commit_command(): - logger.error("Committing %s failed" %(configure_commands)) + logger.error("Committing %s failed" % (configure_commands)) self._discard_changes() return False if not self._save_command(): - logger.error("Saving %s failed" %(configure_commands)) + logger.error("Saving %s failed" % (configure_commands)) self._discard_changes() return False @@ -120,16 +133,16 @@ def _configure_vyos(self, configure_commands): def _configure_policy_route(self, source_cidr, gateway_ip, source_interface): try: - interface_number_string = source_interface.split("eth",1)[1] + interface_number_string = source_interface.split("eth", 1)[1] except IndexError: logger.error("Retrieved wrong interface %s for configuring " - "routes" %(source_interface)) - msg = "Wrong interface %s retrieved for source %s" %( + "routes" % (source_interface)) + msg = "Wrong interface %s retrieved for source %s" % ( source_interface, source_cidr) raise Exception(msg) routing_table_number = ROUTING_TABLE_BASE + int( interface_number_string.split('v')[0]) - pbr_name = "%s_%s" %("pbr", source_interface) + pbr_name = "%s_%s" % ("pbr", source_interface) cmds = copy.deepcopy(VYOS_PBR_COMMANDS) pbr_commands = [] pbr_commands.append(cmds['policy_route'][0] % (pbr_name, "1")) @@ -138,11 +151,11 @@ def _configure_policy_route(self, source_cidr, gateway_ip, pbr_commands.append( cmds['policy_route'][2] % (pbr_name, "1", source_cidr)) - pbr_commands.append(cmds['table_route'][0] %( - routing_table_number, "0.0.0.0/0", gateway_ip)) + pbr_commands.append(cmds['table_route'][0] % ( + routing_table_number, "0.0.0.0/0", gateway_ip)) pbr_commands.append( - cmds['interface_pbr'][0] %(source_interface, pbr_name)) + cmds['interface_pbr'][0] % (source_interface, pbr_name)) if not self._configure_vyos(pbr_commands): logger.error("Configuring Policy Based Routing failed") @@ -151,7 +164,7 @@ def _configure_policy_route(self, source_cidr, gateway_ip, return True def add_source_route(self, routes_info): - routes_info = json.loads(routes_info) + routes_info = jsonutils.loads(routes_info) for route_info in routes_info: source_cidr = route_info['source_cidr'] gateway_ip = route_info['gateway_ip'] @@ -161,34 +174,35 @@ def add_source_route(self, routes_info): except Exception as err: logger.debug("Trying to clear any existing routes before " "setting source routing failed with error: %s" - %(err)) + % (err)) try: self._configure_policy_route( source_cidr, gateway_ip, source_interface) except Exception as err: message = ("Configuring Policy based route failed. " - "Error: %s" %(err)) + "Error: %s" % (err)) raise Exception(message) - return json.dumps(dict(status=True)) + return jsonutils.dumps(dict(status=True)) - # FIXME: When invoked on delete path we have to propagate the error + # REVISIT(Vikash): When invoked on delete path we have + # to propagate the error def _delete_policy_route(self, source_cidr, source_interface): try: - interface_number_string = source_interface.split("eth",1)[1] + interface_number_string = source_interface.split("eth", 1)[1] except IndexError: logger.error("Retrieved wrong interface %s for configuring " - "routes" %(source_interface)) - msg = "Wrong interface %s retrieved for source %s" %( + "routes" % (source_interface)) + msg = "Wrong interface %s retrieved for source %s" % ( source_interface, source_cidr) raise Exception(msg) routing_table_number = ROUTING_TABLE_BASE + int( interface_number_string.split('v')[0]) - pbr_name = "%s_%s" %("pbr", source_interface) + pbr_name = "%s_%s" % ("pbr", source_interface) cmds = copy.deepcopy(VYOS_PBR_COMMANDS) delete_pbr_commands = [] delete_pbr_commands.append(cmds['delete'][0] % ( - source_interface, pbr_name)) + source_interface, pbr_name)) if not self._configure_vyos(delete_pbr_commands): logger.warn("Deleting PBR failed") @@ -205,14 +219,14 @@ def _delete_policy_route(self, source_cidr, source_interface): return def delete_source_route(self, routes_info): - routes_info = json.loads(routes_info) + routes_info = jsonutils.loads(routes_info) for route_info in routes_info: source_cidr = route_info['source_cidr'] source_interface = self._get_if_name_by_cidr(source_cidr, delete=True) if source_interface: self._delete_policy_route(source_cidr, source_interface) - return json.dumps(dict(status=True)) + return jsonutils.dumps(dict(status=True)) def _get_if_name_by_cidr(self, cidr, delete=False): interfaces = netifaces.interfaces() @@ -228,10 +242,11 @@ def _get_if_name_by_cidr(self, cidr, delete=False): netmask = inet_info.get('netmask') ip_address = inet_info.get('addr') subnet_prefix = cidr.split("/") - if (ip_address == subnet_prefix[0] and - (len(subnet_prefix) == 1 or subnet_prefix[1] == "32")): + if (ip_address == subnet_prefix[0] and ( + len(subnet_prefix) == 1 or ( + subnet_prefix[1] == "32"))): return interface - ip_address_netmask = '%s/%s' %(ip_address, netmask) + ip_address_netmask = '%s/%s' % (ip_address, netmask) interface_cidr = netaddr.IPNetwork(ip_address_netmask) if str(interface_cidr.cidr) == cidr: return interface diff --git a/gbpservice/nfp/service_vendor_agents/vyos/execformat/__init__.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/execformat/__init__.py similarity index 100% rename from gbpservice/nfp/service_vendor_agents/vyos/execformat/__init__.py rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/execformat/__init__.py diff --git a/gbpservice/nfp/service_vendor_agents/vyos/execformat/executor.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/execformat/executor.py similarity index 59% rename from gbpservice/nfp/service_vendor_agents/vyos/execformat/executor.py rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/execformat/executor.py index f7b2b39b0a..c091b078e8 100755 --- a/gbpservice/nfp/service_vendor_agents/vyos/execformat/executor.py +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/execformat/executor.py @@ -1,14 +1,23 @@ -#!/usr/bin/env python -import sys -import subprocess -import os +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. -#sys.path.append('/home/vyos/vyos-api/project/') -from vyos_session.configsession import ConfigSession, SessionNotExists, \ - SetupSessionFailed -from vyos_session import utils import logging +import os import shlex +import subprocess + +from vyos_session.configsession import ConfigSession +from vyos_session.configsession import SessionNotExists +from vyos_session import utils logger = logging.getLogger(__name__) utils.init_logger(logger) @@ -18,23 +27,33 @@ except Exception as err: logger.error('A session exist already !') -VYOS_SBIN_DIR = utils.get_config_params('bin','vyos_sbin_dir') +VYOS_SBIN_DIR = utils.get_config_params('bin', 'vyos_sbin_dir') VYOS_SHELL_API = utils.get_config_params('bin', 'shell_api_path') -class OperationFailed(Exception): pass -class OperationNameError(Exception): pass -class ConfigPathNotCorrect(Exception): pass + +class OperationFailed(Exception): + pass + + +class OperationNameError(Exception): + pass + + +class ConfigPathNotCorrect(Exception): + pass + def check_operation_name(args): """ Check if operation/command name is correct. """ if len(args) == 0: logger.error('Operation name required') raise OperationNameError('Operation name required.') - elif args[0] not in ['show','set','delete', 'edit']: + elif args[0] not in ['show', 'set', 'delete', 'edit']: logger.error('Operation name "%s" not correct' % args[0]) raise OperationNameError('Operation name not correct.') return True + def _runner(command): """ Run shell commands via subprocess.Popen() @@ -42,22 +61,29 @@ def _runner(command): # NOTE: # if Popen(self.args, shell=True, ...) => Execution fails # if Popen(self.args, ...) => OSError: [Errno 2] No such file or directory - # if self.args = ['/bin/cli-shell-api','showCfg', ...] and Popen(self.args, ...) that works but actually we keep using ' '.join(self.args). - proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + # if self.args = ['/bin/cli-shell-api','showCfg', ...] and + # Popen(self.args, ...) that works but actually we keep using ' + # '.join(self.args). + proc = subprocess.Popen(command, shell=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) # wait for the process to terminate and get stdout/stderr outputs out, err = proc.communicate() return out, err, proc.returncode + def _op_command(command=None): command = './op_commands.sh' command += " " + "\"run show vpn ipsec sa\"" - proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out,err = proc.communicate() + proc = subprocess.Popen(command, shell=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = proc.communicate() return out -class execUtils: + +class execUtils(object): """ Executes possible operations in a Vyos configure session.""" + def __init__(self, args): self.args = args @@ -71,27 +97,31 @@ def execmd(self, nonsession=False): logger.info('Perform operation "%s"' % operation_name) if not nonsession: - if self.args[0] == 'show': self.args[0] = '{0} showCfg'.format( - VYOS_SHELL_API) - else: self.args[0] = os.path.join(VYOS_SBIN_DIR, - 'my_{0}'.format(self.args[0])) + if self.args[0] == 'show': + self.args[0] = '{0} showCfg'.format( + VYOS_SHELL_API) + else: + self.args[0] = os.path.join(VYOS_SBIN_DIR, + 'my_{0}'.format(self.args[0])) logger.debug('exec command: "%s"' % ' '.join(self.args)) - + if not nonsession and not session.session_exists(): raise SessionNotExists('Configure session do not exists') if not nonsession: - result = _runner(' '.join(self.args)) # result = (stdout, stderr, errcode) + result = _runner(' '.join(self.args)) else: - result = _op_command() - return (True, result) + result = _op_command() + return (True, result) logger.debug('command return code: %s' % result[2]) if result[2]: - logger.info('command output: %s' % ' '.join(result[0].splitlines())) - logger.error('Failed executing operation "%s"' % operation_name) - raise OperationFailed('Operation failed !') + logger.info('command output: %s' % + ' '.join(result[0].splitlines())) + logger.error('Failed executing operation "%s"' % + operation_name) + raise OperationFailed('Operation failed !') logger.debug('%s' % ' '.join(result[0].splitlines())) logger.info('Executing "%s" operation OK' % operation_name) return (True, result[0]) @@ -105,7 +135,7 @@ def check_cmd_args(self): logger.info('config path: "%s"' % config_path) cmd = '{0} exists {1}'.format(VYOS_SHELL_API, config_path) logger.debug('exec command: "%s"' % cmd) - result = _runner(cmd) # result = (stdout, stderr, errcode) + result = _runner(cmd) logger.debug('command return code: %s' % result[2]) if result[2]: logger.error('Configuration path is not correct') @@ -113,41 +143,21 @@ def check_cmd_args(self): logger.info('Configuration path is correct') return True - def check_cmd_args(self): - """ - Check that config path is correct before performing execmd() - """ - logger.info('Check specified configuration path existance') - config_path = ' '.join(self.args[1:]) - logger.info('config path: "%s"' % config_path) - cmd = '{0} exists {1}'.format(VYOS_SHELL_API, config_path) - logger.debug('exec command: "%s"' % cmd) - proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - out, err = proc.communicate() - errcode = proc.returncode - logger.debug('command return code: %s' % errcode) - if errcode: - logger.error('Configuration path is not correct') - raise ConfigPathNotCorrect('Configuration path is not correct') - logger.info('Configuration path is correct') - return True - def get_possible_options(self): """ Returns list of nodes under specified configuration path """ out = [] try: - self.check_cmd_args() # check config path validation + self.check_cmd_args() # check config path validation except ConfigPathNotCorrect: - return False, out # config path is not correct + return False, out # config path is not correct config_path = ' '.join(self.args[1:]) logger.info('Get possible options of config path "%s"' % config_path) cmd = '{0} listNodes {1}'.format(VYOS_SHELL_API, config_path) logger.debug('exec command: "%s"' % cmd) - result = _runner(cmd) # rst = (stdout, stderr, errcode) + result = _runner(cmd) # rst = (stdout, stderr, errcode) logger.debug('command return code: %s' % result[2]) if not result[0]: logger.info('No more options for the specified config path') diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/src/execformat/formator.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/execformat/formator.py new file mode 100755 index 0000000000..54644b6c68 --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/execformat/formator.py @@ -0,0 +1,51 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import sys + +from executor import execUtils as executor +from executor import OperationFailed +from vyos_session.utils import logger +from vyosparser import vyos_parser as vparser + +topdir = os.path.dirname(os.path.realpath(__file__)) + "../.." +topdir = os.path.realpath(topdir) +sys.path.insert(0, topdir) + + +class ServiceError(Exception): + pass + + +class ShowConfig(object): + + def formator(self, options): + args = ['show'] + service = options[0] + logger.debug("=====>>>>>> args before executor call = %s" % args) + if service in ['protocols', 'nat', 'interfaces', 'firewall']: + args.extend(options) + elif service in ['dns', 'dhcp-server', 'ssh', 'webproxy']: + options.insert(0, 'service') + args.extend(options) + else: + raise ServiceError('unknown such service!') + exe = executor(list(args)) + try: + execstate, output = exe.execmd() + logger.debug("=====>>>>>> args after executor call = %s" % args) + except OperationFailed as e: + logger.error(e.message) + return False + if execstate: + return vparser.decode_string(output) diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/src/license_readme b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/license_readme new file mode 100644 index 0000000000..23e8babb85 --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/license_readme @@ -0,0 +1,14 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +configsession.py and utils.py are open source files and originally taken from +"https://github.com/abessifi/pyatta". \ No newline at end of file diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/src/op_commands.sh b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/op_commands.sh new file mode 100755 index 0000000000..7be7e16076 --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/op_commands.sh @@ -0,0 +1,18 @@ +#!/bin/vbash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +cmd1="$1" +source /opt/vyatta/etc/functions/script-template +eval "$cmd1" +echo $? diff --git a/gbpservice/nfp/service_vendor_agents/vyos/vpn_op_commands.pl b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vpn_op_commands.pl similarity index 69% rename from gbpservice/nfp/service_vendor_agents/vyos/vpn_op_commands.pl rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/vpn_op_commands.pl index 5c6317a604..dc60021838 100644 --- a/gbpservice/nfp/service_vendor_agents/vyos/vpn_op_commands.pl +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vpn_op_commands.pl @@ -1,8 +1,18 @@ #!/usr/bin/perl -#Usage: sudo perl op_commands.pl get_tunnel_state peer_ip tunnel-id - +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +#Usage: sudo perl op_commands.pl get_tunnel_state peer_ip tunnel-id use lib "/opt/vyatta/share/perl5/"; use Vyatta::VPN::OPMode; use Data::Dumper qw(Dumper); @@ -45,7 +55,6 @@ sub get_ipsec_tunnel_idx { sub get_ipsec_tunnel_state { my @args = @_; - #args[0] will be subroutine name my $peer = $args[1]; my $tunnel = $args[2]; @@ -60,7 +69,6 @@ sub get_ipsec_tunnel_state { return $state } -#print Dumper \@ARGV; my $call=$ARGV[0]; $call->(@ARGV); diff --git a/gbpservice/nfp/base_configurator/api/root_controller.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/README similarity index 50% rename from gbpservice/nfp/base_configurator/api/root_controller.py rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/README index a2cc5d4dce..51b6206f2b 100644 --- a/gbpservice/nfp/base_configurator/api/root_controller.py +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/README @@ -10,22 +10,17 @@ # License for the specific language governing permissions and limitations # under the License. -import pecan -from v1 import controllers +1) Add the following line in /etc/rc.local file as give in sample rc.local file + sudo bash /usr/share/vyos-pbr/pbr_init & +2) Modify the interfaces file to looks like given sample interfaces file +3) mkdir -p /usr/share/vyos-pbr -class RootController(object): - """This is root controller that forward the request to __init__.py - file inside controller folder inside v1 +4) copy pbr_init and pbr to /usr/share/vyos-pbr - """ - v1 = controllers.V1Controller() - - @pecan.expose() - def get(self): - # TODO(blogan): once a decision is made on how to do versions, do that - # here - return {'versions': [{'status': 'CURRENT', - 'updated': '2014-12-11T00:00:00Z', - 'id': 'v1'}]} +vyos agent (server.py) adds route to the controller node received from pushed +configuration, and also writes a route in the +/usr/share/vyos-pbr/controller_route file. This is to make the added route +persistent even if the instance is rebooted. +controller_route is called from pbr_init file. diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/controller_route b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/controller_route similarity index 100% rename from gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/controller_route rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/controller_route diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/dhclient-script b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/dhclient-script similarity index 100% rename from gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/dhclient-script rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/dhclient-script diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/interface-post-up b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/interface-post-up new file mode 100755 index 0000000000..1410b199dd --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/interface-post-up @@ -0,0 +1,14 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +/sbin/route del default dev $IFACE +echo 1 > /proc/sys/net/ipv4/conf/$IFACE/arp_ignore diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/interfaces b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/interfaces similarity index 100% rename from gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/interfaces rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/interfaces diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/management_pbr b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/management_pbr new file mode 100755 index 0000000000..c76dd95712 --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/management_pbr @@ -0,0 +1,24 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +echo "dhclient: $reason" +case $reason in + BOUND|RENEW|REBIND|REBOOT) + if [ "eth0" == $interface ]; then + sudo bash /usr/share/vyos-pbr/pbr_init & + echo "Management pbr is set" + fi + ;; +esac + diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/no-default-route b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/no-default-route new file mode 100755 index 0000000000..1b99349ba2 --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/no-default-route @@ -0,0 +1,24 @@ +#!/bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +case $reason in + BOUND|RENEW|REBIND|REBOOT) + if [ "eth0" == $interface ]; then + echo $new_routers > /usr/share/vyos-pbr/eth0_route + else + unset new_routers + fi + echo "Default gateway has been cleared" + ;; +esac diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/pbr b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/pbr new file mode 100755 index 0000000000..50ce788225 --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/pbr @@ -0,0 +1,15 @@ +#!/bin/vbash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +sudo ip route add default via $@ diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/pbr_init b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/pbr_init similarity index 68% rename from gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/pbr_init rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/pbr_init index e3fe31c7ce..daf8069611 100755 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/pbr_init +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/pbr_init @@ -1,5 +1,17 @@ #!/bin/bash +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + #Converts decimal to ip address function dec2ip () { local ip dec=$1 @@ -21,7 +33,6 @@ function ip2dec () { } -#sleep 20 flag=0 PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin all_interfaces=`/sbin/ifconfig | grep "Link encap" | grep -v "127.0.0.1" |cut -d: -d " " -f 1` @@ -34,9 +45,7 @@ for i in $all_interfaces; do ip_addr=`/sbin/ifconfig $i | grep "inet " | awk -F'[: ]+' '{ print $4 }'` if [ $ip_addr ] then - #echo "GOT IP for interface $i" - #flag=1 - break + break fi done bcast_ip=`/sbin/ifconfig $i | grep "inet " | awk -F'[: ]+' '{ print $6 }'`; @@ -49,10 +58,8 @@ for i in $all_interfaces; do ip_addresses[$index]=$ip_addr network_id[$index]=$net_ip filename="/var/lib/dhcp3/dhclient_"$i"_lease" - #route=`grep "option routers" $filename |tail -1| cut -d: -d " " -f5` route=`grep "new_routers" $filename |tail -1| cut -d: -d "=" -f2| tr -d "'"` default_route[$index]=$route - #echo "$index ${default_route[$index]}" if [ "$i" == "eth0" ] && [ $flag == 0 ] then table_name=$i"_table" @@ -66,33 +73,18 @@ for i in $all_interfaces; do `ip rule del from ${ip_addresses[$index]} table $table_name` `ip rule add from ${ip_addresses[$index]} table $table_name` echo 0 > /proc/sys/net/ipv4/conf/eth0/accept_source_route - #echo "configured pbr for interface $i" fi - #index=`expr $index + 1` ((index++)) fi done index=0 for interface in ${interfaces[*]}; do - #echo "$index ${default_route[$index]}" if [ "$interface" == "eth1" ] && [ $flag == 0 ] then - #echo "Set default route on $interface ${default_route[$index]} $index" - cmd="bash /usr/share/oc-pbr/pbr ${default_route[$index]}" - #sudo su - vyos -c "$cmd" - #/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper begin - #/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper set protocols static route 0.0.0.0/0 next-hop ${default_route[$index]} - #/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper commit - #/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper end - #source /opt/vyatta/etc/functions/script-template - #eval "set protocols static route 0.0.0.0/0 next-hop ${default_route[$index]}" - #eval "commit" - #eval "exit" + cmd="bash /usr/share/vyos-pbr/pbr ${default_route[$index]}" fi - #index=`expr $index + 1` ((index++)) done -#bash /usr/share/oc-pbr/controller_route echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/rc.local b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/rc.local similarity index 92% rename from gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/rc.local rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/rc.local index 2a68541436..76a4b9b4f7 100755 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/rc.local +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos-pbr/rc.local @@ -15,5 +15,5 @@ # get preserved for the new image during image upgrade. POSTCONFIG=/opt/vyatta/etc/config/scripts/vyatta-postconfig-bootup.script [ -x $POSTCONFIG ] && $POSTCONFIG -sudo bash /usr/share/oc-pbr/pbr_init & +sudo bash /usr/share/vyos-pbr/pbr_init & exit 0 diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos.conf b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos.conf new file mode 100644 index 0000000000..4a2982b609 --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos.conf @@ -0,0 +1,20 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +[bin] +vyos_sbin_dir = /opt/vyatta/sbin +shell_api_path = /bin/cli-shell-api + +[log] +logdir=/var/log/vyos +logfile=vyos.log +level=DEBUG diff --git a/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos_init_script/restart_vpn b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos_init_script/restart_vpn new file mode 100644 index 0000000000..7981efb427 --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos_init_script/restart_vpn @@ -0,0 +1,16 @@ +#!/bin/vbash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +sudo rm /var/run/pluto.pid +vbash -ic 'restart vpn' diff --git a/gbpservice/nfp/service_vendor_agents/vyos/vyos_session/__init__.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos_session/__init__.py similarity index 100% rename from gbpservice/nfp/service_vendor_agents/vyos/vyos_session/__init__.py rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos_session/__init__.py diff --git a/gbpservice/nfp/service_vendor_agents/vyos/vyos_session/configsession.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos_session/configsession.py similarity index 80% rename from gbpservice/nfp/service_vendor_agents/vyos/vyos_session/configsession.py rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos_session/configsession.py index 9b0e666e66..3bfb0a0f0d 100644 --- a/gbpservice/nfp/service_vendor_agents/vyos/vyos_session/configsession.py +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos_session/configsession.py @@ -1,146 +1,169 @@ -import os -from uuid import uuid4 -from utils import get_config_params, _run, clean_environ, init_logger -import logging - -logger = logging.getLogger(__name__) -init_logger(logger) - -VYOS_SHELL_API = get_config_params('bin', 'shell_api_path') -VYOS_SBIN_DIR = get_config_params('bin', 'vyos_sbin_dir') -VYOS_SAVE_SCRIPT = 'vyatta-save-config.pl' - -# Create/Get the logger object -# logger = init_logger() - - -class SessionAlreadyExists(Exception): pass -class SetupSessionFailed(Exception): pass -class OperationFailed(Exception): pass -class SessionNotExists(Exception): pass - - -class Session(object): - """ - Return the session instance if exists. Else, create new one. - SessionAlreadyExists exception raised on the second instantiation. - """ - _ref = None - - def __new__(cls, *args, **kw): - if cls._ref is not None: - raise SessionAlreadyExists('A session exist already !') - cls._ref = super(Session, cls).__new__(cls, *args, **kw) - return cls._ref - - -class ConfigSession(Session): - """ - Create and manage a Vyos config session. - This is a singleton subclass of Session class which ensures that one and - one config session only is opened. - To create instance you have to call setup_config_session() method. - """ - - def setup_config_session(self): - """ - Setup vyos session. A random uuid is generated as a sesssion identifier - ($PPID -Shell PID- could be used as well). - """ - - identifier = uuid4() - env = dict() - env['VYATTA_CHANGES_ONLY_DIR'] = \ - '/opt/vyatta/config/tmp/changes_only_{0}'.format(identifier) - env['VYATTA_CONFIG_TEMPLATE'] = '/opt/vyatta/share/vyatta-cfg/templates' - env['VYATTA_ACTIVE_CONFIGURATION_DIR'] = '/opt/vyatta/config/active' - env['VYATTA_EDIT_LEVEL'] = '/' - env['VYATTA_TEMP_CONFIG_DIR'] = '/opt/vyatta/config/tmp/new_config_{' \ - '0}'.format(identifier) - env['VYATTA_TEMPLATE_LEVEL'] = '/' - env['VYATTA_CONFIG_TMP'] = '/opt/vyatta/config/tmp/tmp_{0}'.format( - identifier) - # Add vyos session environment to system environment. - # This is not good but actually it seems that is the only way to - # handle a persistant vyos session after spawning a shell. - os.environ.update(env) - logger.info('Setting up a configuration session for Vyos') - # Spawn shell and setup vyos config session - if _run('{0} setupSession'.format(VYOS_SHELL_API)): - # Unset vyos session environment and raise an exception - logger.error('Could not create configuration session') - logger.info('Cleaning up session environment variables') - clean_environ(env) - raise SetupSessionFailed('Could not create session !') - self.session_id = identifier - self.session_envs = env - logger.debug('Session identifier is %s', identifier) - logger.debug('Session environment variables: %s', env) - logger.info('Configuration session is set up') - return True - - def session_exists(self): - """ - Test if a vyos config session is set up - """ - return False if _run('{0} inSession'.format(VYOS_SHELL_API)) else True - - def teardown_config_session(self): - """ - End current configuration session. - """ - if not self.session_exists(): - logger.warn('Teardown failed. No session available !') - return False - - if not _run('{0} teardownSession'.format(VYOS_SHELL_API)): - logger.info('Cleaning up session environment variables') - logger.info('Closing Vyos config session') - clean_environ(self.session_envs) - return True - - logger.error('Failed to teardown current config session') - logger.warn('The Vyos config session may still open !') - return False - - def session_changed(self): - """ - Returns if Vyos configuration was changed from current session - """ - if _run('{0} sessionChanged'.format(VYOS_SHELL_API)): - return False - logger.warn('Vyos configuration was changed from current session') - return True - - def commit(self): - """ - Returns True if commit action succeed. False otherwise. - """ - out = _run(os.path.join(VYOS_SBIN_DIR ,'my_commit -l'), output=True) - if not out: - logger.error('Commit changes failed') - raise OperationFailed('[ERROR] Commit changes failed !') - logger.info('Changes successfully commited') - return True - - def discard(self): - """ - Undo config modifications - """ - out = _run(os.path.join(VYOS_SBIN_DIR ,'my_discard'), output=True) - if not out: - raise OperationFailed('[ERROR] Discard changes failed !') - # return out.splitlines()[0] - return out - - def save(self): - """ - Save applied modifications. Changes still persistent even after - system reboot. - """ - out = _run(os.path.join(VYOS_SBIN_DIR, VYOS_SAVE_SCRIPT), output=True) - if not out: - logger.error('Saving changes failed') - raise OperationFailed('[ERROR] Save changes failed !') - logger.info('%s', out) - return True +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import os +from uuid import uuid4 + +from utils import _run +from utils import clean_environ +from utils import get_config_params +from utils import init_logger + +logger = logging.getLogger(__name__) +init_logger(logger) + +VYOS_SHELL_API = get_config_params('bin', 'shell_api_path') +VYOS_SBIN_DIR = get_config_params('bin', 'vyos_sbin_dir') +VYOS_SAVE_SCRIPT = 'vyatta-save-config.pl' + + +class SessionAlreadyExists(Exception): + pass + + +class SetupSessionFailed(Exception): + pass + + +class OperationFailed(Exception): + pass + + +class SessionNotExists(Exception): + pass + + +class Session(object): + """ + Return the session instance if exists. Else, create new one. + SessionAlreadyExists exception raised on the second instantiation. + """ + _ref = None + + def __new__(cls, *args, **kw): + if cls._ref is not None: + raise SessionAlreadyExists('A session exist already !') + cls._ref = super(Session, cls).__new__(cls, *args, **kw) + return cls._ref + + +class ConfigSession(Session): + """ + Create and manage a Vyos config session. + This is a singleton subclass of Session class which ensures that one and + one config session only is opened. + To create instance you have to call setup_config_session() method. + """ + + def setup_config_session(self): + """ + Setup vyos session. A random uuid is generated as a sesssion identifier + ($PPID -Shell PID- could be used as well). + """ + + identifier = uuid4() + env = dict() + env['VYATTA_CHANGES_ONLY_DIR'] = \ + '/opt/vyatta/config/tmp/changes_only_{0}'.format(identifier) + env['VYATTA_CONFIG_TEMPLATE'] = ('/opt/vyatta/share/' + 'vyatta-cfg/templates') + env['VYATTA_ACTIVE_CONFIGURATION_DIR'] = '/opt/vyatta/config/active' + env['VYATTA_EDIT_LEVEL'] = '/' + env['VYATTA_TEMP_CONFIG_DIR'] = '/opt/vyatta/config/tmp/new_config_{' \ + '0}'.format(identifier) + env['VYATTA_TEMPLATE_LEVEL'] = '/' + env['VYATTA_CONFIG_TMP'] = '/opt/vyatta/config/tmp/tmp_{0}'.format( + identifier) + # Add vyos session environment to system environment. + # This is not good but actually it seems that is the only way to + # handle a persistant vyos session after spawning a shell. + os.environ.update(env) + logger.info('Setting up a configuration session for Vyos') + # Spawn shell and setup vyos config session + if _run('{0} setupSession'.format(VYOS_SHELL_API)): + # Unset vyos session environment and raise an exception + logger.error('Could not create configuration session') + logger.info('Cleaning up session environment variables') + clean_environ(env) + raise SetupSessionFailed('Could not create session !') + self.session_id = identifier + self.session_envs = env + logger.debug('Session identifier is %s', identifier) + logger.debug('Session environment variables: %s', env) + logger.info('Configuration session is set up') + return True + + def session_exists(self): + """ + Test if a vyos config session is set up + """ + return False if _run('{0} inSession'.format(VYOS_SHELL_API)) else True + + def teardown_config_session(self): + """ + End current configuration session. + """ + if not self.session_exists(): + logger.warn('Teardown failed. No session available !') + return False + + if not _run('{0} teardownSession'.format(VYOS_SHELL_API)): + logger.info('Cleaning up session environment variables') + logger.info('Closing Vyos config session') + clean_environ(self.session_envs) + return True + + logger.error('Failed to teardown current config session') + logger.warn('The Vyos config session may still open !') + return False + + def session_changed(self): + """ + Returns if Vyos configuration was changed from current session + """ + if _run('{0} sessionChanged'.format(VYOS_SHELL_API)): + return False + logger.warn('Vyos configuration was changed from current session') + return True + + def commit(self): + """ + Returns True if commit action succeed. False otherwise. + """ + out = _run(os.path.join(VYOS_SBIN_DIR, 'my_commit -l'), output=True) + if not out: + logger.error('Commit changes failed') + raise OperationFailed('[ERROR] Commit changes failed !') + logger.info('Changes successfully commited') + return True + + def discard(self): + """ + Undo config modifications + """ + out = _run(os.path.join(VYOS_SBIN_DIR, 'my_discard'), output=True) + if not out: + raise OperationFailed('[ERROR] Discard changes failed !') + return out + + def save(self): + """ + Save applied modifications. Changes still persistent even after + system reboot. + """ + out = _run(os.path.join(VYOS_SBIN_DIR, VYOS_SAVE_SCRIPT), output=True) + if not out: + logger.error('Saving changes failed') + raise OperationFailed('[ERROR] Save changes failed !') + logger.info('%s', out) + return True diff --git a/gbpservice/nfp/service_vendor_agents/vyos/vyos_session/utils.py b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos_session/utils.py similarity index 76% rename from gbpservice/nfp/service_vendor_agents/vyos/vyos_session/utils.py rename to gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos_session/utils.py index 6dc579aae2..02069e5e48 100644 --- a/gbpservice/nfp/service_vendor_agents/vyos/vyos_session/utils.py +++ b/gbpservice/nfp/service_vendor_agents/vyos/agent/src/vyos_session/utils.py @@ -1,13 +1,25 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + import ConfigParser -import subprocess -import os import logging import logging.handlers as handlers +import os +import subprocess # In production environment CONFIG_DIR should be /etc/pyatta/ -CONFIG_DIR = "/usr/share/vyos-oc" -CONFIG_FILE_NAME = "oc-vyos.conf" -AVAILABLE_LOG_LEVELS = ['DEBUG','INFO','WARN','ERROR','CRITICAL'] +CONFIG_DIR = "/usr/share/vyos" +CONFIG_FILE_NAME = "vyos.conf" +AVAILABLE_LOG_LEVELS = ['DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'] DEFAULT_LOG_LEVEL = 'INFO' logger = logging.getLogger(__name__) @@ -45,11 +57,11 @@ def get_log_filehandler(): os.makedirs(log_dir) open(log_file_path, 'a').close() except OSError as exception: - print exception + logger.error(exception) return False - print "[INFO] Create log file %s" % log_file_path + logger.info("[INFO] Create log file %s" % log_file_path) # create file handler - fh = logging.FileHandler(log_file_path,'a') + fh = logging.FileHandler(log_file_path, 'a') fh.setLevel(eval('logging.{0}'.format(get_log_level()))) return fh @@ -90,14 +102,15 @@ def _run(cmd, output=False): stdout=subprocess.PIPE, stderr=subprocess.PIPE) except Exception as err: - message = 'Executing command %s failed with error %s' %(cmd, err) + message = 'Executing command %s failed with error %s' % (cmd, err) logger.error(message) return False cmd_output, cmd_error = exec_pipe.communicate() - # VPN commits succeed but we are getting perl locale warnings on stderr + # VPN commits succeed but we are getting perl locale warnings on stderr if exec_pipe.returncode != 0: - message = 'Executing command %s failed with error %s. Output is: %s'%(cmd, cmd_error, cmd_output) + message = ('Executing command %s failed with error %s. ' + 'Output is: %s' % (cmd, cmd_error, cmd_output)) logger.error(message) return False else: @@ -106,7 +119,7 @@ def _run(cmd, output=False): else: try: logger.debug('exec command: "%s"', cmd) - out = subprocess.check_call(cmd, shell=True) # returns 0 for True + out = subprocess.check_call(cmd, shell=True) # returns 0 for True except subprocess.CalledProcessError as err: logger.error('command execution failed with Error: %s', err) out = 1 # returns 1 for False @@ -118,14 +131,16 @@ def _run(cmd, output=False): # issue :http://vyatta38.rssing.com/chan-10627532/all_p7.html # Not sure if the other commands also may fails or if there is an issue with # the way the config module does things + + def _alternate_set_and_commit(cmd): try: vyos_wrapper = "/opt/vyatta/sbin/vyatta-cfg-cmd-wrapper" - begin_cmd = "%s begin" %(vyos_wrapper) - set_cmd = "%s %s" %(vyos_wrapper, cmd) - commit_cmd = "%s commit" %(vyos_wrapper) + begin_cmd = "%s begin" % (vyos_wrapper) + set_cmd = "%s %s" % (vyos_wrapper, cmd) + commit_cmd = "%s commit" % (vyos_wrapper) save_cmd = "%s save" % (vyos_wrapper) - end_cmd = "%s end" %(vyos_wrapper) + end_cmd = "%s end" % (vyos_wrapper) command = "%s;%s;%s;%s;%s" % (begin_cmd, set_cmd, commit_cmd, save_cmd, end_cmd) logger.debug('exec command: "%s"', command) @@ -133,26 +148,29 @@ def _alternate_set_and_commit(cmd): stdout=subprocess.PIPE, stderr=subprocess.PIPE) except Exception as err: - message = 'Executing command %s failed with error %s' %(command, err) + message = 'Executing command %s failed with error %s' % (command, err) logger.error(message) return False cmd_output, cmd_error = exec_pipe.communicate() # VPN commits succeed but we are getting perl locale warnings on stderr if exec_pipe.returncode != 0: - message = 'Executing command %s failed with error %s' %(command, cmd_error) + message = 'Executing command %s failed with error %s' % ( + command, cmd_error) logger.error(message) return False else: logger.debug('command output: %s', cmd_output) return True + def clean_environ(env): """ Delete some envionment variables from system. """ for key in env.keys(): - if os.environ.get('key'): del os.environ[key] + if os.environ.get('key'): + del os.environ[key] def ip2network(ip): diff --git a/gbpservice/nfp/service_vendor_agents/vyos/auth_server.conf b/gbpservice/nfp/service_vendor_agents/vyos/auth_server.conf deleted file mode 100755 index ef768a8728..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/auth_server.conf +++ /dev/null @@ -1,6 +0,0 @@ -http://10.30.120.97:5000/ -services -neutron -noir0123 -vpn -45fe9bb731054eb4acdae8e15d48a562 diff --git a/gbpservice/nfp/service_vendor_agents/vyos/build_vyos_deb.sh b/gbpservice/nfp/service_vendor_agents/vyos/build_vyos_deb.sh new file mode 100644 index 0000000000..a7c35efab8 --- /dev/null +++ b/gbpservice/nfp/service_vendor_agents/vyos/build_vyos_deb.sh @@ -0,0 +1,127 @@ +#! /bin/bash + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -e + +SOURCE_CODE_DIR=$1 +DEB_PACKAGE_DIR=$1/deb-packages +version=$2 +release=$3 +DEBIAN_PATH=$DEB_PACKAGE_DIR/vyos-$version-$release + +print_usage () { + + echo "Usage: " + echo " $0 "; + +} + +validate_nob_dev_dir () { + + if [ "x$SOURCE_CODE_DIR" == "x" ]; then + echo "Error: vyos code dir not specified"; + print_usage; + exit 0; + elif [ ! -d $SOURCE_CODE_DIR ]; then + echo "Error: $SOURCE_CODE_DIR does not exist"; + print_usage; + exit 0; + fi; +} + +validate_package_version_release () { + + if [ "x$version" == "x" ]; then + echo "Error: Package version not specified"; + print_usage; + exit 0; + elif [ "x$release" == "x" ]; then + echo "Error: Package release not specified"; + print_usage; + exit 0; + fi + +} + +create_deb_package_dir () { + + if [ -d $DEB_PACKAGE_DIR ]; then + : + else + mkdir -p $DEB_PACKAGE_DIR + fi + +} + +create_dir_structure () { + + # creating base directory for package + if [ -d $DEBIAN_PATH ] ; then + rm -rf $DEBIAN_PATH/* + else + mkdir -p $DEBIAN_PATH + fi + + mkdir -p $DEBIAN_PATH/config/auth + mkdir -p $DEBIAN_PATH/usr/bin + mkdir -p $DEBIAN_PATH/usr/share + mkdir -p $DEBIAN_PATH/etc/network/ + mkdir -p $DEBIAN_PATH/config/scripts + mkdir -p $DEBIAN_PATH/etc/dhcp3/dhclient-exit-hooks.d/ +} + + +copy_source_code () { + + commit_id=`git log | head -1` + branch_name=`git rev-parse --abbrev-ref HEAD` + echo "Version: $version-$release" > $DEBIAN_PATH/etc/sc-version + + cp -r $SOURCE_CODE_DIR/DEBIAN $DEBIAN_PATH/. + cp -r $SOURCE_CODE_DIR/etc $DEBIAN_PATH/. + + cp -r $SOURCE_CODE_DIR/bin/vyos $DEBIAN_PATH/usr/bin/. + cp -r $SOURCE_CODE_DIR/src $DEBIAN_PATH/usr/share/vyos + + cp -r $SOURCE_CODE_DIR/src/vyos-pbr/interfaces $DEBIAN_PATH/etc/network/. + cp -r $SOURCE_CODE_DIR/src/vyos-pbr/interface-post-up $DEBIAN_PATH/etc/network/. + cp -r $SOURCE_CODE_DIR/src/vyos-pbr/management_pbr $DEBIAN_PATH/etc/dhcp3/dhclient-exit-hooks.d/. + + # TODO: Do we need this + cp -r $SOURCE_CODE_DIR/src/vyos_init_script/restart_vpn $DEBIAN_PATH/config/scripts/. + mv $DEBIAN_PATH/usr/share/vyos/vyos-pbr $DEBIAN_PATH/usr/share/ + sed -i "s/vyos ([0-9]*.[0-9]*-*[0-9]*)/vyos ($version-$release)/g" $DEBIAN_PATH/DEBIAN/changelog + sed -i "/^Source:/c Source: vyos-$version-$release" $DEBIAN_PATH/DEBIAN/control + sed -i "s/^Version:.*/Version: $version-$release/g" $DEBIAN_PATH/DEBIAN/control +} + +build_deb_package () { + + CURDIR=${PWD} + cd $DEB_PACKAGE_DIR + dpkg-deb --build vyos-$version-$release + cd $CURDIR + + echo "Vyos package will be available in : $DEB_PACKAGE_DIR/vyos-$version-$release.deb " +} + + + +validate_nob_dev_dir +validate_package_version_release +create_deb_package_dir +create_dir_structure +copy_source_code +build_deb_package + diff --git a/gbpservice/nfp/service_vendor_agents/vyos/execformat/formator.py b/gbpservice/nfp/service_vendor_agents/vyos/execformat/formator.py deleted file mode 100755 index 55c41a0518..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/execformat/formator.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python -import sys -import os -topdir = os.path.dirname(os.path.realpath(__file__)) + "../.." -topdir = os.path.realpath(topdir) -sys.path.insert(0, topdir) -from executor import OperationFailed, execUtils as executor -from vyos_session.utils import logger -from vyosparser import vyos_parser as vparser - -class ServiceError(Exception): pass - -class showConfig(): - def formator(self,options): - args=['show'] - service = options[0] - logger.debug("=====>>>>>> args before executor call = %s"%args) - if service in ['protocols','nat','interfaces','firewall']: - args.extend(options) - elif service in ['dns','dhcp-server','ssh','webproxy']: - options.insert(0,'service') - args.extend(options) - else: - raise ServiceError('unknown such service!') - exe=executor(list(args)) - try: - #if not exe.checkcmd(' '.join(args)): - # logger.error("%s: given args does not match with existing configs!"%args) - # return False - execstate,output=exe.execmd() - logger.debug("=====>>>>>> args after executor call = %s"%args) - except OperationFailed, e: - logger.error(e.message) - return False - if execstate==True: - return vparser.decode_string(output) - - diff --git a/gbpservice/nfp/service_vendor_agents/vyos/init_script/oc-vyos b/gbpservice/nfp/service_vendor_agents/vyos/init_script/oc-vyos deleted file mode 100755 index 850a85e5ed..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/init_script/oc-vyos +++ /dev/null @@ -1,130 +0,0 @@ -#! /bin/sh -# -# One Convergence, Inc. CONFIDENTIAL -# Copyright (c) 2012-2014, One Convergence, Inc., USA -# All Rights Reserved. -# -# All information contained herein is, and remains the property of -# One Convergence, Inc. and its suppliers, if any. The intellectual and -# technical concepts contained herein are proprietary to One Convergence, -# Inc. and its suppliers. -# -# Dissemination of this information or reproduction of this material is -# strictly forbidden unless prior written permission is obtained from -# One Convergence, Inc., USA -# -### BEGIN INIT INFO -# Provides: oc-vyos -# Required-Start: $remote_fs $syslog -# Required-Stop: $remote_fs $syslog -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: OC vyos service -# Description: Provides the oc-vyos service -### END INIT INFO -set -e -PIDFILE=/var/run/elastic_services/oc-vyos.pid -LOGFILE=/var/log/elastic_services/oc-vyos.log -DAEMON=/usr/bin/oc-vyos -DAEMON_ARGS="--log-file=$LOGFILE" -DAEMON_DIR=/var/run/elastic_services -ENABLED=true -if test -f /etc/default/oc-vyos; then -. /etc/default/oc-vyos -fi -mkdir -p /var/run/elastic_services -mkdir -p /var/log/elastic_services -. /lib/lsb/init-functions -export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" -export TMPDIR=/var/lib/elastic_services/tmp -if [ ! -x ${DAEMON} ] ; then -exit 0 -fi -case "$1" in -start) -test "$ENABLED" = "true" || exit 0 -start=1 -## check if pidfile is there -if [ -f $PIDFILE ]; then -pid=`cat $PIDFILE` -## check if pid is there -if [ "1$pid" -ne "1" ]; then -## check if process with pid not running -set +e -kill -0 $pid > /dev/null 2>&1 -[ $? -eq 0 ] && start=0 -set -e -fi -fi -if [ $start -eq 1 ]; then -## ensure stale processes killed -set +e -running_processes=`ps aux | grep "python /usr/bin/oc-vyos" | grep -v grep | awk '{print $2}' | wc -l` -[ $running_processes -gt 0 ] && ps aux | grep "python /usr/bin/oc-vyos" | grep -v grep | awk '{print $2}' | xargs kill -9 -set -e -log_daemon_msg "Starting oc vyos" -# We have completely messed up the rc level scripts -sudo chown vyos:users -R /var/run/elastic_services -sudo -u vyos start-stop-daemon -Sbmv --pidfile $PIDFILE --chdir $DAEMON_DIR --exec $DAEMON -- $DAEMON_ARGS -log_end_msg $? -else -echo "oc-vyos[$pid] is already running" -fi -;; -stop) -test "$ENABLED" = "true" || exit 0 -if [ -f $PIDFILE ]; then -set +e -kill -0 `cat $PIDFILE` > /dev/null 2>&1 -if [ $? -eq 0 ]; then -set -e -log_daemon_msg "Stopping oc vyos" -start-stop-daemon --stop --oknodo --pidfile ${PIDFILE} -log_end_msg $? -else -echo "No process with PID `cat $PIDFILE` found running, removing the PID file" -fi -rm $PIDFILE -else -echo "PID file not existing" -fi -## ensure stale processes killed -set +e -running_processes=`ps aux | grep "python /usr/bin/oc-vyos" | grep -v grep | awk '{print $2}' | wc -l` -[ $running_processes -gt 0 ] && ps aux | grep "python /usr/bin/oc-vyos" | grep -v grep | awk '{print $2}' | xargs kill -9 -set -e -;; -restart|force-reload) -test "$ENABLED" = "true" || exit 1 -$0 stop -sleep 2 -$0 start -;; -reload) -test "$ENABLED" = "true" || exit 0 -## check if pidfile is there -if [ -f $PIDFILE ]; then -set +e -kill -0 `cat $PIDFILE` > /dev/null 2>&1 -if [ $? -eq 0 ]; then -set -e -log_daemon_msg "Reloading oc vyos" -start-stop-daemon --stop --signal 1 --quiet --oknodo --pidfile $PIDFILE -log_end_msg $? -else -echo "No process with PID `cat $PIDFILE` found running, removing the PID file" -fi -else -echo "oc vyos is not running or PID file not existing" -fi -;; -status) -test "$ENABLED" = "true" || exit 0 -status_of_proc -p $PIDFILE $DAEMON oc-vyos && exit 0 || exit $? -;; -*) -log_action_msg "Usage: /etc/init.d/oc-vyos {start|stop|restart|force-reload|reload|status}" -exit 1 -;; -esac -exit 0 diff --git a/gbpservice/nfp/service_vendor_agents/vyos/init_script/restart_vpn b/gbpservice/nfp/service_vendor_agents/vyos/init_script/restart_vpn deleted file mode 100755 index 2cb8fe4762..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/init_script/restart_vpn +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/vbash -sudo rm /var/run/pluto.pid -vbash -ic 'restart vpn' diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/README b/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/README deleted file mode 100644 index 9a29ea0b1f..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/README +++ /dev/null @@ -1,14 +0,0 @@ -1) Add the following line in /etc/rc.local file as give in sample rc.local file - sudo bash /usr/share/oc-pbr/pbr_init & -2) Modify the interfaces file to looks like given sample interfaces file - -3) mkdir -p /usr/share/oc-pbr - -4) copy pbr_init and pbr to /usr/share/oc-pbr - - -vyos agent (server2.py) adds route to the controller node received from pushed -configuration, and also writes a route in the -/usr/share/oc-pbr/controller_route file. This is to make the added route -persistent even if the instance is rebooted. -controller_route is called from pbr_init file. diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/interface-post-up b/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/interface-post-up deleted file mode 100755 index 56862e7f18..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/interface-post-up +++ /dev/null @@ -1,2 +0,0 @@ -/sbin/route del default dev $IFACE -echo 1 > /proc/sys/net/ipv4/conf/$IFACE/arp_ignore diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/management_pbr b/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/management_pbr deleted file mode 100755 index ed5776260b..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/management_pbr +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -echo "dhclient: $reason" -case $reason in - BOUND|RENEW|REBIND|REBOOT) - if [ "eth0" == $interface ]; then - sudo bash /usr/share/oc-pbr/pbr_init & - echo "Management pbr is set" - fi - ;; -esac - diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/no-default-route b/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/no-default-route deleted file mode 100755 index 885672d274..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/no-default-route +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -case $reason in - BOUND|RENEW|REBIND|REBOOT) - if [ "eth0" == $interface ]; then - echo $new_routers > /usr/share/oc-pbr/eth0_route - #unset new_routers - else - unset new_routers - fi - echo "Default gateway has been cleared" - ;; -esac diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/pbr b/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/pbr deleted file mode 100755 index 90e91c1464..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc-pbr/pbr +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/vbash -sudo ip route add default via $@ diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc-vyos.conf b/gbpservice/nfp/service_vendor_agents/vyos/oc-vyos.conf deleted file mode 100644 index 1ae8510480..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc-vyos.conf +++ /dev/null @@ -1,8 +0,0 @@ -[bin] -vyos_sbin_dir = /opt/vyatta/sbin -shell_api_path = /bin/cli-shell-api - -[log] -logdir=/var/log/oc -logfile=oc-vyos.log -level=ERROR diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/fw_constants.py b/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/fw_constants.py deleted file mode 100644 index 5e417c91e8..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/fw_constants.py +++ /dev/null @@ -1,17 +0,0 @@ -# One Convergence, Inc. CONFIDENTIAL -# Copyright (c) 2012-2015, One Convergence, Inc., USA -# All Rights Reserved. -# -# All information contained herein is, and remains the property of -# One Convergence, Inc. and its suppliers, if any. The intellectual and -# technical concepts contained herein are proprietary to One Convergence, -# Inc. and its suppliers. -# -# Dissemination of this information or reproduction of this material is -# strictly forbidden unless prior written permission is obtained from -# One Convergence, Inc., USA - -actions = ["drop", "reject", "accept", "inspect"] -state = ["established", "invalid", "related"] -availability = ["enable", "disable"] -intercloud = False diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/fw_handler.py b/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/fw_handler.py deleted file mode 100755 index 9ffe9581ab..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/fw_handler.py +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env python - -from operations import configOpts - -FWN = "firewall name" -ZPZ = "zone-policy zone" -class fwHandler(configOpts): - actions=["drop","reject","accept","inspect"] - state=["established","invalid","related"] - availability=["enable","disable"] - - def firewall_config(self,name,suffix): - firewall=[FWN,name,"rule"] - firewall.extend(suffix) - self.set(firewall) - - def zone_config(self,suffix): - zone=[ZPZ] - zone.extend(suffix) - self.set(zone) - - def set_zone_desc(self,zone_name,desc): - description = [zone_name,"description",desc] - self.zone_config(description) - - def set_zone_interface(self,zone_name,iface): - interface = [zone_name,"interface",iface] - self.zone_config(interface) - - def setup_fw_on_zone(self,zone_src,zone_dst,firewall): - fw_on_zone=[zone_src,"from",zone_dst,"name",firewall] - self.zone_config(fw_on_zone) - - def set_default_action(self,name,rule_num,action): - if action in self.actions: - self.set_action[rule_num,"action",action] - self.firewall_config(name,set_action) - - def set_rule_state(self,name,rule_num,state,allow): - if state in self.states and allow in self.availability: - self.set_state[rule_num,"state",state,allow] - self.firewall_config(name,set_state) - - def set_protocol(self,name,rule_num,prot): - protocol=[rule_num,"protocol",prot] - self.firewall_config(name,protocol) - - def set_dest_port(self,name,rule_num,portlist,orient="destination"): - port=[rule_num,orient,"port",portlist] - self.firewall_config(name,port) - - - def set_dest_addr(self,name,rule_num,addr_subnet,orient="destination"): - addr=[rule_num,orient,"address",addr_subnet] - self.firewall_config(name,addr) - - def set_src_port(self,name,rule_num,portlist): - self.set_dest_port(name,rule_num,portlist,"source") - - def set_src_addr(self,name,rule_num,addr_subnet): - self.set_dest_addr(name,rule_num,addr_subnet,"source") - - def rule_state(self,name,rule_num,status): - if status in availability: - rule_status=[rule_num,status] - self.firewall_config(name,rule_status) diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/ha_config.py b/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/ha_config.py deleted file mode 100644 index bd1017c22c..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/ha_config.py +++ /dev/null @@ -1,367 +0,0 @@ -# One Convergence, Inc. CONFIDENTIAL -# Copyright (c) 2012-2015, One Convergence, Inc., USA -# All Rights Reserved. -# -# All information contained herein is, and remains the property of -# One Convergence, Inc. and its suppliers, if any. The intellectual and -# technical concepts contained herein are proprietary to One Convergence, -# Inc. and its suppliers. -# -# Dissemination of this information or reproduction of this material is -# strictly forbidden unless prior written permission is obtained from -# One Convergence, Inc., USA - -#!/usr/bin/env python -import json -import netifaces -import time -import logging -from netifaces import AF_INET, AF_LINK - -from execformat.executor import session - -from operations import configOpts -from vyos_session import utils - -logger = logging.getLogger(__name__) -utils.init_logger(logger) - - -class VYOSHAConfig(configOpts): - """ - Class to configure HA for VYOS. - """ - def __init__(self): - super(VYOSHAConfig, self).__init__() - pass - - def configure_conntrack_sync(self, ha_config): - """ - :param ha_config: - :return: - """ - ha_config = json.loads(ha_config) - monitoring_info, data_info = self.get_conntrack_request_data( - ha_config) - event_queue_size = monitoring_info["event_queue_size"] - cluster_name = monitoring_info["cluster_name"] - mcast_group = monitoring_info["mcast_group"] - sync_queue_size = monitoring_info["sync_queue_size"] - monitoring_mac = monitoring_info["monitoring_mac"] - - monitoring_interface, monitoring_ip = self._get_interface_name( - dict(monitoring_mac=monitoring_mac), - interface_type='monitoring') - - if not monitoring_interface: - # return {'status': 500, - # 'message': 'Failed to get monitoring interface name'} - logger.error("Failed to configure conntrack for CLUSTER- %r" % - cluster_name) - raise Exception("Conntrack sync configuration failed. Reason - " - "No monitoring interface information found.", - 400, dict(ha_config=ha_config)) - - conntrack_commands = self._set_conntrack( - cluster_name, event_queue_size, mcast_group, - monitoring_interface, sync_queue_size) - interface_vrrp_commands = self.set_vrrp_for_interface(data_info) - - all_commands = conntrack_commands + interface_vrrp_commands - - self._execute_commands(all_commands, ha_config['tenant_id']) - logger.debug("VRRP configured succesfully - %r " % all_commands) - return {'status': 200, 'message': 'VRRP configured succesfully'} - - def set_interface_ha(self, interface_config): - ha_config = json.loads(interface_config) - try: - cluster_name = ha_config["cluster_name"] - vrrp_group = ha_config["vrrp_group"] - data_macs = ha_config["data_macs"] - preempt_delay = ha_config["preempt_delay"] - priority = ha_config["priority"] - vip = ha_config["vip"] - tenant_id = ha_config["tenant_id"] - advertised_interval = ha_config["advertised_interval"] - except KeyError, err: - raise Exception("HA configuration for interface failed. Value " - "not found. %r" % str(err), - 400, dict(interface_config=ha_config)) - interface_info = dict(vrrp_group=vrrp_group, data_macs=data_macs, - vip=vip, preempt_delay=preempt_delay, - priority=priority, cluster_name=cluster_name, - advertised_interval=advertised_interval, - tenant_id=tenant_id) - - interface_vrrp_commands = self.set_vrrp_for_interface(interface_info) - - self._execute_commands(interface_vrrp_commands, interface_info[ - 'tenant_id']) - logger.debug("VRRP succesfully configured for interfaces.") - return {'status': 200, 'message': 'VRRP succesfully configured for ' - 'interfaces'} - - def delete_vrrp(self, vrrp_config): - """ - :param self: - :param vrrp_config: - :return: - This method makes an assumption that detach of an interface will - finally clean the vrrp entry. That's why doesn't raise any - exception, as was observed that even though it succeeds but raises an - exception. Investigation will continue. Also this methods doesn't - clean conntrack explicitly, instead it goes with VM delete. - Exception code will be incorporated once the exception established - case. - """ - vrrp_config = json.loads(vrrp_config) - data_macs = vrrp_config["data_macs"] - # cluster_name = vrrp_config.get("cluster_name", "CLUSTER-1") - - data_interface, data_ip = self._get_interface_name( - dict(data_mac=data_macs['provider_mac']), interface_type='data') - - provider_vrrp_delete = "interfaces ethernet %s vrrp" % data_interface - - data_interface, data_ip = self._get_interface_name( - dict(data_mac=data_macs['stitching_mac']), interface_type='data') - stitching_vrrp_delete = "interfaces ethernet %s vrrp" % data_interface - - session.setup_config_session() - # delete_conntrack_cluster = ("service conntrack-sync " - # "failover-mechanism vrrp sync-group %s" % - # cluster_name) - # try: - # self.delete(group_delete.split()) - # except Exception, err: - # session.discard() - # session.teardown_config_session() - # raise Exception(err) - - try: - self.delete(provider_vrrp_delete.split()) - except Exception, err: - # session.discard() - logger.error("Error deleting provider vrrp %r " % err) - # raise Exception(err) - - try: - self.delete(stitching_vrrp_delete.split()) - except Exception, err: - # session.discard() - logger.error("Error deleting stitching vrrp %r " % err) - # raise Exception(err) - - # try: - # self.delete(delete_conntrack_cluster.split()) - # except Exception, err: - # session.discard() - # session.teardown_config_session() - # raise Exception(err) - # logger.error("Error deleting conntrack - %r " % err) - - session.commit() - time.sleep(5) - session.save() - # REVISIT (VK) This sleep need to get invoked if we see any issue - # with session teardown. - # time.sleep(5) - session.teardown_config_session() - logger.debug("VRRP succesfully deleted for interfaces") - return {'status': 200, 'message': 'VRRP succesfully deleted for ' - 'interfaces'} - - def set_vrrp_for_interface(self, data_info): - interface_commands = list() - direct_call = False - if isinstance(data_info, str): - direct_call = True - data_info = json.loads(data_info) - data_macs = data_info.get("data_macs", {}) - vips = data_info.get("vip", {}) - vrrp_groups = data_info["vrrp_group"] - - for mac_type, mac in data_macs.iteritems(): - # mac_type - provider_mac, stitching_mac - data_mac = dict(data_mac=str(mac)) - vip_type = mac_type.split("_")[0] + "_vip" - vip_ip = vips.get(vip_type) - if mac_type == "provider_mac": - vrrp_group = vrrp_groups["provider_vrrp_group"] - if mac_type == "stitching_mac": - vrrp_group = vrrp_groups["stitching_vrrp_group"] - - interface_name, ip = self._get_interface_name( - data_mac, interface_type='data') - - if not interface_name: - logger.error("Failed to configure VRRP, as unable to get " - "interface name.") - raise Exception('VRRP config failed.Failed to get interface' - ' name to configure vrrp', 400, - dict(data_info=data_info)) - - common_command = "interfaces ethernet %s vrrp vrrp-group %s " % ( - interface_name, vrrp_group) - - interface_address_set = "interfaces ethernet %s address %s " % ( - interface_name, ip) - - advt_interval_set = common_command + "advertise-interval %s " % ( - data_info["advertised_interval"]) - - preempt_set = common_command + "preempt true" - # preempt_set = common_command + "preempt %s" % data_info[ - # "preempt"] - preempt_delay_set = common_command + "preempt-delay %s" % \ - data_info["preempt_delay"] - priority_set = common_command + "priority %s" % data_info[ - "priority"] - rfc_set = common_command + "rfc3768-compatibility" - sync_group_set = common_command + "sync-group %s " % data_info[ - "cluster_name"] - virtual_address_set = common_command + "virtual-address %s" % \ - vip_ip - - interface_commands += [interface_address_set, advt_interval_set, - preempt_set, preempt_delay_set, - priority_set, rfc_set, sync_group_set, - virtual_address_set] - - logger.debug("Interface commands - %r ", interface_commands) - if not direct_call: - return interface_commands - else: - self._execute_commands(interface_commands, data_info.get( - 'tenant_id')) - return dict(message='Interface configured succesfully') - - @staticmethod - def _set_conntrack(cluster_name, event_queue_size, mcast_group, - monitoring_interface, sync_queue_size): - peer_link_set = "interfaces ethernet %s description PEER-LINK" % \ - monitoring_interface - event_queue_set = "service conntrack-sync event-listen-queue-size " \ - "%s" % str(event_queue_size) - cluster_set = "service conntrack-sync failover-mechanism vrrp " \ - "sync-group " + cluster_name - interface_set = "service conntrack-sync interface %s" % \ - monitoring_interface - mcast_set = "service conntrack-sync mcast-group %s " % mcast_group - sync_queue_set = "service conntrack-sync sync-queue-size %s " % \ - str(sync_queue_size) - commands = [peer_link_set, event_queue_set, cluster_set, - interface_set, mcast_set, sync_queue_set] - - logger.debug("Conntrack commands - %r " % commands) - return commands - - @staticmethod - def _get_interface_name(ha_config, interface_type=None): - """ - :param ha_config: - :param interface_type: - :return: - """ - interfaces = netifaces.interfaces() - for interface in interfaces: - physical_interface = netifaces.ifaddresses(interface).get(AF_LINK) - if not physical_interface: - continue - if AF_INET not in netifaces.ifaddresses(interface).keys(): - continue - mac_addr = netifaces.ifaddresses(interface)[AF_LINK][0]['addr'] - ip_addr = netifaces.ifaddresses(interface)[AF_INET][0]['addr'] - netmask = netifaces.ifaddresses(interface)[AF_INET][0]['netmask'] - if mac_addr == ha_config.get('monitoring_mac', None) and \ - interface_type.lower() == 'monitoring': - return interface, ip_addr - elif (mac_addr == ha_config.get('data_mac', None) and - interface_type.lower() == 'data'): - mlen = sum([bin(int(x)).count('1') for x in - netmask.split('.')]) - ip_addr += ("/" + str(mlen)) - return interface, ip_addr - - logger.error("interface name none, ha_config: %s" % ha_config) - return None, None - - def get_conntrack_request_data(self, ha_config): - try: - monitoring_mac = ha_config["monitoring_mac"] - queue_size = ha_config.get("queue_size", 8) - cluster_name = ha_config["cluster_name"] - mcast_group = ha_config["mcast_group"] - sync_queue_size = ha_config.get("sync_queue_size", 1) - vrrp_group = ha_config["vrrp_group"] - data_macs = ha_config["data_macs"] - preempt_delay = ha_config["preempt_delay"] - priority = ha_config["priority"] - vip = ha_config["vip"] - advertised_interval = ha_config["advertised_interval"] - except KeyError, err: - raise Exception("Parameters missing for conntrack configuration " - "%r" % str(err), 400, {"ha_config": ha_config}) - - monitoring_info = dict(monitoring_mac=monitoring_mac, - event_queue_size=queue_size, - cluster_name=cluster_name, - mcast_group=mcast_group, - sync_queue_size=sync_queue_size) - - data_info = dict(vrrp_group=vrrp_group, data_macs=data_macs, - vip=vip, preempt_delay=preempt_delay, - priority=priority, cluster_name=cluster_name, - advertised_interval=advertised_interval) - - return monitoring_info, data_info - - def get_interface_data(self, interface_config): - try: - data_macs = interface_config["data_macs"] - advertised_interval = interface_config["advertised_interval"] - vrrp_group = interface_config["vrrp_group"] - preempt_delay = interface_config["preempt_delay"] - priority = interface_config["priority"] - vip = interface_config["vip"] - except KeyError: - pass - - data_info = dict(data_macs=data_macs, - advertised_interval=advertised_interval, - vrrp_group=vrrp_group, preempt_delay=preempt_delay, - priority=priority, vip=vip) - - return data_info - - def _execute_commands(self, all_commands, tenant_id=None): - session.setup_config_session() - for command in all_commands: - try: - self.set(command.split()) - except: - logger.error("Failed to configure HA. Tenant - %r" % tenant_id) - session.teardown_config_session() - raise Exception("Failed to configure HA for tenant %s" % - tenant_id, 400, {"commands": all_commands, - "failed_command": command}) - try: - session.commit() - except: - logger.error("Failed to commit HA configuration. Tenant - %r" - % tenant_id) - session.discard() - time.sleep(2) - session.teardown_config_session() - raise Exception("Failed to configure HA for tenant %s" % tenant_id, - 400, {"commands": all_commands, - "failed_command": command}) - time.sleep(5) - session.save() - time.sleep(5) - session.teardown_config_session() - - - - diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/interface_monitor.sh b/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/interface_monitor.sh deleted file mode 100755 index f2ebd12247..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/interface_monitor.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash - - function enumerate_net_interfaces { - - echo `date` `ip addr` >> /var/log/oc/vyos_monitor - echo "\n" - echo `date` `sudo netstat -pantl | grep 8888` >>/var/log/oc/vyos_monitor - } - - enumerate_net_interfaces - diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/log_forwarder.py b/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/log_forwarder.py deleted file mode 100644 index b40135d752..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/log_forwarder.py +++ /dev/null @@ -1,55 +0,0 @@ -# One Convergence, Inc. CONFIDENTIAL -# Copyright (c) 2012-2015, One Convergence, Inc., USA -# All Rights Reserved. -# -# All information contained herein is, and remains the property of -# One Convergence, Inc. and its suppliers, if any. The intellectual and -# technical concepts contained herein are proprietary to One Convergence, -# Inc. and its suppliers. -# -# Dissemination of this information or reproduction of this material is -# strictly forbidden unless prior written permission is obtained from -# One Convergence, Inc., USA - -import logging -import subprocess - -from vyos_session import utils - -OP_SUCCESS = True -OP_FAILED = False - -logger = logging.getLogger(__name__) -utils.init_logger(logger) - -class APIHandler(object): - def __init__(self): - pass - - def run_command(self, command): - proc = subprocess.Popen(command, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - - out, err = proc.communicate() - if err: - logger.error("Unable to run command %s, ERROR- %s" % - (command, err)) - return None - return out - - def configure_rsyslog_as_client(self, config): - command = """ - /opt/vyatta/sbin/vyatta-cfg-cmd-wrapper begin - /opt/vyatta/sbin/vyatta-cfg-cmd-wrapper set system syslog host %s facility all level %s - /opt/vyatta/sbin/vyatta-cfg-cmd-wrapper commit - /opt/vyatta/sbin/vyatta-cfg-cmd-wrapper save - """ %(config['server_ip'], config['log_level']) - - try: - out = self.run_command(command) - return OP_SUCCESS - except Exception as ex: - logger.error("Error while configuring rsyslog as client. %s" % ex) - return OP_FAILED diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/operations.py b/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/operations.py deleted file mode 100755 index 849124a9a2..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/operations.py +++ /dev/null @@ -1,65 +0,0 @@ -# One Convergence, Inc. CONFIDENTIAL -# Copyright (c) 2012-2015, One Convergence, Inc., USA -# All Rights Reserved. -# -# All information contained herein is, and remains the property of -# One Convergence, Inc. and its suppliers, if any. The intellectual and -# technical concepts contained herein are proprietary to One Convergence, -# Inc. and its suppliers. -# -# Dissemination of this information or reproduction of this material is -# strictly forbidden unless prior written permission is obtained from -# One Convergence, Inc., USA - -#!/usr/bin/env python - -import sys -import os -import logging -topdir = os.path.dirname(os.path.realpath(__file__)) + "../.." -topdir = os.path.realpath(topdir) -sys.path.insert(0, topdir) -from execformat.executor import execUtils, OperationFailed -from vyos_session import utils - -logger = logging.getLogger(__name__) -utils.init_logger(logger) - - -class configOpts(object): - - def __init__(self): - pass - - def set_1(self, args): - exe=execUtils(list(args)) - exe.execmd() - - def delete_1(self, args): - exe=execUtils(list(args)) - exe.execmd() - - def show(self, args): - exe=execUtils(list(args)) - res,output=exe.execmd(nonsession=True) - return res,output - - def set(self, args): - args.insert(0, 'set') - exe=execUtils(list(args)) - try: - exe.execmd() - return True - except OperationFailed, e: - logger.error(e.message) - return False - - def delete(self, args): - args.insert(0, 'delete') - exe=execUtils(list(args)) - try: - exe.execmd() - return True - except OperationFailed, e: - logger.error(e.message) - return False diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/server.py b/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/server.py deleted file mode 100644 index d9cd383db8..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/server.py +++ /dev/null @@ -1,57 +0,0 @@ -# One Convergence, Inc. CONFIDENTIAL -# Copyright (c) 2012-2015, One Convergence, Inc., USA -# All Rights Reserved. -# -# All information contained herein is, and remains the property of -# One Convergence, Inc. and its suppliers, if any. The intellectual and -# technical concepts contained herein are proprietary to One Convergence, -# Inc. and its suppliers. -# -# Dissemination of this information or reproduction of this material is -# strictly forbidden unless prior written permission is obtained from -# One Convergence, Inc., USA - -import signal -import logging -import sys -from vyos_session.utils import init_logger - - -logger = logging.getlogger(__name__) -init_logger(logger) - - -class OCVyOSServer(object): - def __init__(self): - pass - - - -def handler(signum, frame): - if signum in [2, 3, 11, 15]: - logger.info(" Recieved signal: %r. Thus exiting " % signum) - sys.exit() - else: - logger.info(" Caught singal: %r. Ignoring " % signum) - - -def main(argv): - vyos_server = OCVyOSServer() - host = '' - port = 0 - if len(argv) != 5: - print "server.py -h -p " - sys.exit(2) - - # Review - OSM: We should accept -h -p in any order. - if argv[1] == '-h': - host = argv[2] - if argv[3] == '-p': - port = int(argv[4]) - signal.signal(signal.SIGTERM, handler) - signal.signal(signal.SIGINT, handler) - vyos_server.start(vyos_server.server, host, port) - - -if __name__ == '__main__': - main(sys.argv) diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/stats_parser.py b/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/stats_parser.py deleted file mode 100644 index 4272ce4d91..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/stats_parser.py +++ /dev/null @@ -1,336 +0,0 @@ -# One Convergence, Inc. CONFIDENTIAL -# Copyright (c) 2012-2015, One Convergence, Inc., USA -# All Rights Reserved. -# -# All information contained herein is, and remains the property of -# One Convergence, Inc. and its suppliers, if any. The intellectual and -# technical concepts contained herein are proprietary to One Convergence, -# Inc. and its suppliers. -# -# Dissemination of this information or reproduction of this material is -# strictly forbidden unless prior written permission is obtained from -# One Convergence, Inc., USA - -import logging -import subprocess -import netifaces - -from netifaces import AF_LINK -from vyos_session import utils - -logger = logging.getLogger(__name__) -utils.init_logger(logger) - -class APIHandler(object): - def __init__(self): - pass - - def run_command(self, command): - proc = subprocess.Popen(command, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - - out, err = proc.communicate() - if err: - logger.error("Unable to run command %s, ERROR- %s" % - (command, err)) - return None - return out - - def _get_interface_name(self, interface_mac): - interfaces = netifaces.interfaces() - - for interface in interfaces: - try: - mac_addr = netifaces.ifaddresses(interface)[AF_LINK][0]['addr'] - if mac_addr == interface_mac: - return interface - - except KeyError as keyerr: - logger.error('Unable to Parse Stats Data, ' + - 'KeyError: {}'.format(keyerr)) - return None - - def parse_firewall_stats(self, interface, raw_stats): - """ - sample data for command show_firewall_detail.xsl : - - IPv4 Firewall "oc_fw_eth1": - - Active on (eth1,OUT) - - rule action proto packets bytes - ---- ------ ----- ------- ----- - 11 accept tcp 476405 24805598 - condition - saddr 11.0.1.0/24 daddr 11.0.2.0/24 tcp dpt:22 - - 12 accept icmp 1222414 101692572 - condition - saddr 11.0.1.0/24 daddr 11.0.2.0/24 - - 13 drop udp 150770055788 DROP - condition - saddr 11.0.2.0/24 daddr /* - - 14 accept tcp 3589762 238449000 - condition - saddr 11.0.1.0/24 daddr 11.0.2.0/24 tcp dpt:80 - - 10000 drop all 0 0 - condition - saddr 0.0.0.0/0 daddr 0.0.0.0/0 - - """ - firewall = {} - firewalls = [] - firewall_start = False - table = False - status = None - rule_keys = ['rulepriority', 'packets', 'bytes', 'action', - 'source', 'destination'] - - try: - for line in raw_stats.split('\n'): - words = line.split() - if 'IPv4 Firewall' in line: - firewall_start = True - if 'Active on' in line and interface in line and firewall_start: - status = "Active" - (interface, direction) = words[2][1:-1].split(',') - firewall['interface'] = interface - firewall['dir'] = direction - firewall['rules'] = [] - elif len(words) >= 4: - if words[3] in ['ACCEPT', 'DROP'] and status == "Active": - table = True - rule = dict(zip(rule_keys, words)) - firewall['rules'].append(rule) - elif table and status == "Active": - command = ('/opt/vyatta/bin/vyatta-show-firewall.pl "all_all" ' + - '/opt/vyatta/share/xsl/show_firewall_detail.xsl') - show_fw_data = self.run_command(command) - firewall = self.add_protocol_and_dest_port_info(firewall, show_fw_data) - logger.info("packed firewall \n %s" % firewall) - firewalls.append(firewall) - break - - except KeyError as keyerr: - logger.error('Unable to Parse Firewall Stats Data, ' + - 'KeyError: {}'.format(keyerr)) - - except IndexError as inderr: - logger.error('Unable to Parse Firewall Stats Data, ' + - 'IndexError: {}'.format(inderr)) - - return firewalls - - def add_protocol_and_dest_port_info(self, firewall, show_fw_data): - firewall_started = False - firewall_info_started = False - firewall_matcher = "Active on (" + firewall['interface'] - firewall_info_end = "-------------" - firewall_info = [] - for line in show_fw_data.split('\n'): - if "IPv4 Firewall" in line: - firewall_started = True - if firewall_matcher in line: - firewall_info_started = True - if firewall_started and firewall_info_started: - firewall_info.append(line) - if firewall_started and firewall_info_started and firewall_info_end in line: - break - try: - for rule in firewall.get('rules', []): - for index, stats in enumerate(firewall_info): - if stats is not '': - extract_stats = stats.split() - if rule['rulepriority'] in extract_stats[0]: - rule['protocol'] = extract_stats[2] - for key in firewall_info[index + 1].split(): - if "dpt:" in key: - rule['dest_port'] = key.split(':')[1] - break - break - - except KeyError as keyerr: - logger.error('Unable to Parse Firewall Stats Data, ' + - 'KeyError: {}'.format(keyerr)) - - except IndexError as inderr: - logger.error('Unable to Parse Firewall Stats Data, ' + - 'IndexError: {}'.format(inderr)) - - return firewall - - def parse_vpn_s2s(self, raw_stats): - """ - sample data for command show-ipsec-sa-detail : - - Peer IP: 192.168.20.194 - Peer ID: 120.0.0.2 - Local IP: 91.0.0.11 - Local ID: 91.0.0.11 - NAT Traversal: no - NAT Source Port: n/a - NAT Dest Port: n/a - - Tunnel 1: - State: up - Inbound SPI: c6621bd8 - Outbound SPI: cbf2ab18 - Encryption: aes128 - Hash: sha1 - PFS Group: 5 - - Local Net: 90.0.0.0/24 - Local Protocol: all - Local Port: all - - Remote Net: 120.0.0.0/24 - Remote Protocol: all - Remote Port: all - - Inbound Bytes: 654.0 - Outbound Bytes: 504.0 - Active Time (s): 289 - Lifetime (s): 1800 - - """ - s2s_connection = {} - s2s_connections = [] - - try: - for line in raw_stats.split('\n'): - key = '' - value = '' - if ':' in line: - key,value = line.split(":") - - if 'Peer IP' in key: - s2s_connection['peerip'] = value.strip(" \t\n\r") - - elif 'Local IP' in key: - s2s_connection['localip'] = value.strip(" \t\n\r") - - elif "Tunnel" in key: - s2s_connection['tunnels'] = [] - tunnel_info = { 'tunnel' : - key.strip(" \t\n\r").split(" ")[-1] } - - elif "Inbound Bytes" in key: - tunnel_info['in'] = value.strip(" \t\n\r") - - elif "Outbound Bytes" in key: - tunnel_info['out'] = value.strip(" \t\n\r") - s2s_connection['tunnels'].append(tunnel_info) - s2s_connections.append(s2s_connection) - s2s_connection = {} - - except KeyError as keyerr: - logger.error('Unable to Parse IPSec VPN Stats Data, ' + - 'KeyError: {}'.format(keyerr)) - - except IndexError as inderr: - logger.error('Unable to Parse IPSec VPN Stats Data, ' + - 'IndexError: {}'.format(inderr)) - - return s2s_connections - - def parse_vpn_remote(self, raw_stats): - """ - sample data for command vyatta-show-ovpn.pl --mode=server : - - OpenVPN server status on vtun0 [] - - Client CN Remote IP Tunnel IP TX byte RX byte Connected Since - --------- --------- --------- ------- ------- --------------- - UNDEF 192.168.2.81 192.168.200.4 8.0K 2.7K Tue Mar 8 09:01:05 2016 - """ - table = False - remote_connection = {} - remote_connections = [] - keys = ['clientCN', 'remoteip', 'tunnelip', 'in', 'out', 'connected_since'] - - try: - for line in raw_stats.split('\n'): - if "Client CN" in line: - table = True - elif len(line.split()) >= 5 and table and "---" not in line: - value_list = line.split()[:-5] - connected_since = " ".join(line.split()[5:]) - clients = filter(lambda value: value.strip(), value_list) - clients.append(connected_since) - remote_connection = dict(zip(keys, clients)) - remote_connections.append(remote_connection) - - except KeyError as keyerr: - logger.error('Unable to Parse Remote VPN Stats Data, ' + - 'KeyError: {}'.format(keyerr)) - - except IndexError as inderr: - logger.error('Unable to Parse Remote VPN Stats Data, ' + - 'IndexError: {}'.format(inderr)) - - return remote_connections - - def get_fw_stats(self, mac_address): - """ - sample data for command show_firewall_statistics.xsl : - - IPv4 Firewall "oc_fw_eth1": - - Active on (eth1,OUT) - - rule packets bytes action source destination - ---- ------- ----- ------ ------ ----------- - 11 476.22K 24.80M ACCEPT 11.0.1.0/24 11.0.2.0/24 - 12 1.22M 101.66M ACCEPT 11.0.1.0/24 11.0.2.0/24 - 13 3.43G 150.73G DROP 11.0.1.0/24 11.0.2.0/24 - 14 3.59M 238.39M ACCEPT 11.0.1.0/24 11.0.2.0/24 - 10000 0 0 DROP 0.0.0.0/0 0.0.0.0/0 - - """ - interface = None - parsed_stats = {} - - command = ('/opt/vyatta/bin/vyatta-show-firewall.pl "all_all" ' + - '/opt/vyatta/share/xsl/show_firewall_statistics.xsl') - - raw_stats = self.run_command(command) - interface = self._get_interface_name(mac_address) - if not interface: - logger.error('No interface available for mac address: %s' % - mac_address) - return parsed_stats - parsed_stats = self.parse_firewall_stats(interface, raw_stats) - - logger.info("Firewall stats Data, \n %s" % parsed_stats) - return parsed_stats - - def get_vpn_stats(self): - vpn_parsed_data = {} - command = ('sudo /opt/vyatta/bin/sudo-users/vyatta-op-vpn.pl ' + - '--show-ipsec-sa-detail') - - raw_ipsec_stats = self.run_command(command) - if raw_ipsec_stats: - ipsec_parsed_data = self.parse_vpn_s2s(raw_ipsec_stats) - if ipsec_parsed_data: - vpn_parsed_data['ipsec'] = ipsec_parsed_data - else: - logger.warning("Empty IPSec VPN Stats") - else: - logger.warning("Empty IPSec VPN Stats") - - command = ('sudo /opt/vyatta/bin/sudo-users/vyatta-show-ovpn.pl ' + - '--mode=server') - - raw_remote_stats = self.run_command(command) - if raw_remote_stats: - remote_parsed_data = self.parse_vpn_remote(raw_remote_stats) - if remote_parsed_data: - vpn_parsed_data['remote'] = remote_parsed_data - else: - logger.warning("Empty Remote VPN Stats") - else: - logger.warning("Empty Remote VPN Stats") - - logger.info("VPN stats Data, \n %s" % vpn_parsed_data) - return vpn_parsed_data diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/vyos_dhc.py b/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/vyos_dhc.py deleted file mode 100644 index 3cf064c37b..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/vyos_dhc.py +++ /dev/null @@ -1,16 +0,0 @@ -import netifaces -import shlex -import subprocess - - -def initiate_dhclient(): - interfaces = netifaces.interfaces() - for interface in interfaces: - cmd = "sudo dhclient %s" % interface - args = shlex.split(cmd) - if not netifaces.ifaddresses(interface).get(netifaces.AF_INET): - output, error = subprocess.Popen( - args, stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate() - if error: - raise diff --git a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/vyos_exception.py b/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/vyos_exception.py deleted file mode 100644 index 3ffa2486ad..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/vyos_exception.py +++ /dev/null @@ -1,17 +0,0 @@ -class OCException(Exception): - """ - """ - status_code = 400 - - def __init__(self, message, status_code=None, payload=None): - Exception.__init__(self) - self.message = message - if status_code: - self.status_code = status_code - self.payload = payload - - def to_dict(self): - rv = dict(self.payload or ()) - rv["message"] = self.message - return rv - diff --git a/gbpservice/nfp/service_vendor_agents/vyos/op_commands.sh b/gbpservice/nfp/service_vendor_agents/vyos/op_commands.sh deleted file mode 100755 index a3aab4d1f1..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/op_commands.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/vbash -cmd1="$1" -source /opt/vyatta/etc/functions/script-template -eval "$cmd1" -echo $? -#run show vpn ipsec sa diff --git a/gbpservice/nfp/service_vendor_agents/vyos/vyos_init_script/oc-vyos b/gbpservice/nfp/service_vendor_agents/vyos/vyos_init_script/oc-vyos deleted file mode 100755 index 850a85e5ed..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/vyos_init_script/oc-vyos +++ /dev/null @@ -1,130 +0,0 @@ -#! /bin/sh -# -# One Convergence, Inc. CONFIDENTIAL -# Copyright (c) 2012-2014, One Convergence, Inc., USA -# All Rights Reserved. -# -# All information contained herein is, and remains the property of -# One Convergence, Inc. and its suppliers, if any. The intellectual and -# technical concepts contained herein are proprietary to One Convergence, -# Inc. and its suppliers. -# -# Dissemination of this information or reproduction of this material is -# strictly forbidden unless prior written permission is obtained from -# One Convergence, Inc., USA -# -### BEGIN INIT INFO -# Provides: oc-vyos -# Required-Start: $remote_fs $syslog -# Required-Stop: $remote_fs $syslog -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: OC vyos service -# Description: Provides the oc-vyos service -### END INIT INFO -set -e -PIDFILE=/var/run/elastic_services/oc-vyos.pid -LOGFILE=/var/log/elastic_services/oc-vyos.log -DAEMON=/usr/bin/oc-vyos -DAEMON_ARGS="--log-file=$LOGFILE" -DAEMON_DIR=/var/run/elastic_services -ENABLED=true -if test -f /etc/default/oc-vyos; then -. /etc/default/oc-vyos -fi -mkdir -p /var/run/elastic_services -mkdir -p /var/log/elastic_services -. /lib/lsb/init-functions -export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" -export TMPDIR=/var/lib/elastic_services/tmp -if [ ! -x ${DAEMON} ] ; then -exit 0 -fi -case "$1" in -start) -test "$ENABLED" = "true" || exit 0 -start=1 -## check if pidfile is there -if [ -f $PIDFILE ]; then -pid=`cat $PIDFILE` -## check if pid is there -if [ "1$pid" -ne "1" ]; then -## check if process with pid not running -set +e -kill -0 $pid > /dev/null 2>&1 -[ $? -eq 0 ] && start=0 -set -e -fi -fi -if [ $start -eq 1 ]; then -## ensure stale processes killed -set +e -running_processes=`ps aux | grep "python /usr/bin/oc-vyos" | grep -v grep | awk '{print $2}' | wc -l` -[ $running_processes -gt 0 ] && ps aux | grep "python /usr/bin/oc-vyos" | grep -v grep | awk '{print $2}' | xargs kill -9 -set -e -log_daemon_msg "Starting oc vyos" -# We have completely messed up the rc level scripts -sudo chown vyos:users -R /var/run/elastic_services -sudo -u vyos start-stop-daemon -Sbmv --pidfile $PIDFILE --chdir $DAEMON_DIR --exec $DAEMON -- $DAEMON_ARGS -log_end_msg $? -else -echo "oc-vyos[$pid] is already running" -fi -;; -stop) -test "$ENABLED" = "true" || exit 0 -if [ -f $PIDFILE ]; then -set +e -kill -0 `cat $PIDFILE` > /dev/null 2>&1 -if [ $? -eq 0 ]; then -set -e -log_daemon_msg "Stopping oc vyos" -start-stop-daemon --stop --oknodo --pidfile ${PIDFILE} -log_end_msg $? -else -echo "No process with PID `cat $PIDFILE` found running, removing the PID file" -fi -rm $PIDFILE -else -echo "PID file not existing" -fi -## ensure stale processes killed -set +e -running_processes=`ps aux | grep "python /usr/bin/oc-vyos" | grep -v grep | awk '{print $2}' | wc -l` -[ $running_processes -gt 0 ] && ps aux | grep "python /usr/bin/oc-vyos" | grep -v grep | awk '{print $2}' | xargs kill -9 -set -e -;; -restart|force-reload) -test "$ENABLED" = "true" || exit 1 -$0 stop -sleep 2 -$0 start -;; -reload) -test "$ENABLED" = "true" || exit 0 -## check if pidfile is there -if [ -f $PIDFILE ]; then -set +e -kill -0 `cat $PIDFILE` > /dev/null 2>&1 -if [ $? -eq 0 ]; then -set -e -log_daemon_msg "Reloading oc vyos" -start-stop-daemon --stop --signal 1 --quiet --oknodo --pidfile $PIDFILE -log_end_msg $? -else -echo "No process with PID `cat $PIDFILE` found running, removing the PID file" -fi -else -echo "oc vyos is not running or PID file not existing" -fi -;; -status) -test "$ENABLED" = "true" || exit 0 -status_of_proc -p $PIDFILE $DAEMON oc-vyos && exit 0 || exit $? -;; -*) -log_action_msg "Usage: /etc/init.d/oc-vyos {start|stop|restart|force-reload|reload|status}" -exit 1 -;; -esac -exit 0 diff --git a/gbpservice/nfp/service_vendor_agents/vyos/vyos_init_script/restart_vpn b/gbpservice/nfp/service_vendor_agents/vyos/vyos_init_script/restart_vpn deleted file mode 100644 index 2cb8fe4762..0000000000 --- a/gbpservice/nfp/service_vendor_agents/vyos/vyos_init_script/restart_vpn +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/vbash -sudo rm /var/run/pluto.pid -vbash -ic 'restart vpn' diff --git a/gbpservice/tests/contrib/devstack/exercises-nfp/fw.sh b/gbpservice/tests/contrib/devstack/exercises-nfp/fw.sh new file mode 100755 index 0000000000..b1aa6565e1 --- /dev/null +++ b/gbpservice/tests/contrib/devstack/exercises-nfp/fw.sh @@ -0,0 +1,138 @@ +#!/usr/bin/env bash + +# **fw.sh** + +# Sanity check that firewall service is created with NFP + +echo "*********************************************************************" +echo "Begin NFP Exercise: $0" +echo "*********************************************************************" + +# Settings +# ======== + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +source $TOP_DIR/openrc neutron service + +create_gbp_resources() { + gbp servicechain-node-create --service-profile base_mode_fw --template-file $TOP_DIR/nfp-templates/fw_template.yml FWNODE + gbp servicechain-spec-create --nodes "FWNODE" fw-chainspec + gbp policy-action-create --action-type REDIRECT --action-value fw-chainspec redirect-to-fw + gbp policy-action-create --action-type ALLOW allow-to-fw + gbp policy-classifier-create --protocol tcp --direction bi fw-web-classifier-tcp + gbp policy-classifier-create --protocol udp --direction bi fw-web-classifier-udp + gbp policy-classifier-create --protocol icmp --direction bi fw-web-classifier-icmp + gbp policy-rule-create --classifier fw-web-classifier-tcp --actions redirect-to-fw fw-web-redirect-rule + gbp policy-rule-create --classifier fw-web-classifier-tcp --actions allow-to-fw fw-web-allow-rule-tcp + gbp policy-rule-create --classifier fw-web-classifier-udp --actions allow-to-fw fw-web-allow-rule-udp + gbp policy-rule-create --classifier fw-web-classifier-icmp --actions allow-to-fw fw-web-allow-rule-icmp + gbp policy-rule-set-create --policy-rules "fw-web-redirect-rule fw-web-allow-rule-tcp fw-web-allow-rule-udp fw-web-allow-rule-icmp" fw-webredirect-ruleset + gbp group-create fw-consumer --consumed-policy-rule-sets "fw-webredirect-ruleset=None" + gbp group-create fw-provider --provided-policy-rule-sets "fw-webredirect-ruleset=None" +} + +delete_gbp_resources() { + gbp group-delete fw-provider + gbp group-delete fw-consumer + gbp policy-rule-set-delete fw-webredirect-ruleset + gbp policy-rule-delete fw-web-redirect-rule + gbp policy-rule-delete fw-web-allow-rule-tcp + gbp policy-rule-delete fw-web-allow-rule-icmp + gbp policy-rule-delete fw-web-allow-rule-udp + gbp policy-classifier-delete fw-web-classifier-tcp + gbp policy-classifier-delete fw-web-classifier-icmp + gbp policy-classifier-delete fw-web-classifier-udp + gbp policy-action-delete redirect-to-fw + gbp policy-action-delete allow-to-fw + gbp servicechain-spec-delete fw-chainspec + gbp servicechain-node-delete FWNODE +} + +validate_gbp_resources() { + ServiceChainInstanceCount=`gbp sci-list -f value | grep fw-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "1" ]; then + echo "Chain creation Succeded" + else + echo "Chain creation failed" + fi +} + +validate_firewall_resources() { + FirewallRuleCount=`neutron firewall-rule-list -f value | grep Rule | wc -l` + if [ "$FirewallRuleCount" -eq "4" ]; then + echo "Firewall Rule resource created" + else + echo "Firewall Rule resource not created" + fi + + FirewallPolicyCount=`neutron firewall-policy-list -f value | grep fw | wc -l` + if [ "$FirewallPolicyCount" -eq "1" ]; then + echo "Firewall Policy resource created" + else + echo "Firewall Policy resource not created" + fi + + FirewallCount=`neutron firewall-list -f value | wc -l` + if [ "$FirewallCount" -eq "1" ]; then + echo "Firewall resource created" + FirewallUUID=`neutron firewall-list -f value | awk '{print $1}'` + FirewallStatus=`neutron firewall-show $FirewallUUID -f value -c status` + echo "Firewall resource is in $FirewallStatus state" + else + echo "Firewall resource not created" + fi +} + +update_gbp_resources() { + # Update existing chain, by removing 2 rules + #gbp servicechain-node-update FWNODE --template-file $TOP_DIR/nfp-templates/fw_updated_template.yml + + #FirewallRuleCount=`neutron firewall-rule-list -f value | wc -l` + #if [ "$FirewallRuleCount" -eq "2" ]; then + # echo "Chain created" + #else + # echo "Chain not created" + #fi + + gbp group-delete fw-provider + gbp group-delete fw-consumer + ServiceChainInstanceCount=`gbp sci-list -f value | grep fw-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "0" ]; then + echo "Chain deleted" + else + echo "Chain not deleted" + fi + + # Service chain creation/deletion through PRS update + gbp group-create fw-consumer --consumed-policy-rule-sets "fw-webredirect-ruleset=None" + gbp group-create fw-provider + ServiceChainInstanceCount=`gbp sci-list -f value | grep fw-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "0" ]; then + echo "Chain not created" + else + echo "Chain not deleted" + fi + + gbp group-update fw-provider --provided-policy-rule-sets "fw-webredirect-ruleset=None" + ServiceChainInstanceCount=`gbp sci-list -f value | grep fw-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "1" ]; then + echo "Chain created" + else + echo "Chain not created" + fi +} + +create_gbp_resources +validate_gbp_resources +validate_firewall_resources + +update_gbp_resources + +delete_gbp_resources diff --git a/gbpservice/tests/contrib/devstack/exercises-nfp/fw_lb.sh b/gbpservice/tests/contrib/devstack/exercises-nfp/fw_lb.sh new file mode 100755 index 0000000000..397f80a5c0 --- /dev/null +++ b/gbpservice/tests/contrib/devstack/exercises-nfp/fw_lb.sh @@ -0,0 +1,204 @@ +#!/usr/bin/env bash + +# **fw_lb.sh** + +# Sanity check that firewall and loadbalancer service chain is created with NFP + +echo "*********************************************************************" +echo "Begin NFP Exercise: $0" +echo "*********************************************************************" + +# Settings +# ======== + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +source $TOP_DIR/openrc neutron service + +create_gbp_resources() { + # E-W insertion + gbp servicechain-node-create --service-profile base_mode_fw --template-file $TOP_DIR/nfp-templates/fw_template.yml FW_LB-FWNODE + gbp servicechain-node-create --service-profile base_mode_lb --template-file $TOP_DIR/nfp-templates/haproxy.template FW_LB-LBNODE + gbp servicechain-spec-create --nodes "FW_LB-FWNODE FW_LB-LBNODE" fw_lb_chainspec + gbp policy-action-create --action-type REDIRECT --action-value fw_lb_chainspec redirect-to-fw_lb + gbp policy-classifier-create --protocol tcp --direction bi fw_lb-webredirect + gbp policy-rule-create --classifier fw_lb-webredirect --actions redirect-to-fw_lb fw_lb-web-redirect-rule + gbp policy-rule-set-create --policy-rules "fw_lb-web-redirect-rule" fw_lb-webredirect-ruleset + gbp network-service-policy-create --network-service-params type=ip_single,name=vip_ip,value=self_subnet fw_lb_nsp + gbp group-create fw_lb-consumer --consumed-policy-rule-sets "fw_lb-webredirect-ruleset=None" + gbp group-create fw_lb-provider --provided-policy-rule-sets "fw_lb-webredirect-ruleset=None" --network-service-policy fw_lb_nsp +} + +delete_gbp_resources() { + gbp group-delete fw_lb-provider + gbp group-delete fw_lb-consumer + gbp network-service-policy-delete fw_lb_nsp + gbp policy-rule-set-delete fw_lb-webredirect-ruleset + gbp policy-rule-delete fw_lb-web-redirect-rule + gbp policy-classifier-delete fw_lb-webredirect + gbp policy-action-delete redirect-to-fw_lb + gbp servicechain-spec-delete fw_lb_chainspec + gbp servicechain-node-delete FW_LB-LBNODE + gbp servicechain-node-delete FW_LB-FWNODE +} + +validate_gbp_resources() { + ServiceChainInstanceCount=`gbp sci-list -f value | grep fw_lb-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "1" ]; then + echo "Chain creation Succeded" + else + echo "Chain creation failed" + fi + + ServiceChainNodeCount=`gbp scn-list -f value | grep FW_LB | wc -l` + if [ "$ServiceChainNodeCount" -eq "2" ]; then + echo "Network function creation Succeded" + else + echo "Network function creation failed" + fi +} + +validate_firewall_resources() { + FirewallRuleCount=`neutron firewall-rule-list -f value | grep Rule | wc -l` + if [ "$FirewallRuleCount" -eq "4" ]; then + echo "Firewall Rule resource created" + else + echo "Firewall Rule resource not created" + fi + + FirewallPolicyCount=`neutron firewall-policy-list -f value | grep fw | wc -l` + if [ "$FirewallPolicyCount" -eq "1" ]; then + echo "Firewall Policy resource created" + else + echo "Firewall Policy resource not created" + fi + + FirewallCount=`neutron firewall-list -f value | wc -l` + if [ "$FirewallCount" -eq "1" ]; then + echo "Firewall resource created" + FirewallUUID=`neutron firewall-list -f value | awk '{print $1}'` + FirewallStatus=`neutron firewall-show $FirewallUUID -f value -c status` + echo "Firewall resource is in $FirewallStatus state" + else + echo "Firewall resource not created" + fi +} + +validate_loadbalancer_resources() { + LBPoolCount=`neutron lb-pool-list -f value | wc -l` + if [ "$LBPoolCount" -eq "1" ]; then + echo "LB Pool resource created" + LBPoolUUID=`neutron lb-pool-list -f value | awk '{print $1}'` + LBPoolStatus=`neutron lb-pool-show $LBPoolUUID -f value -c status` + echo "LB Pool resource is in $LBPoolStatus state" + else + echo "LB Pool resource not created" + fi + + LBVIPCount=`neutron lb-vip-list -f value | wc -l` + if [ "$LBVIPCount" -eq "1" ]; then + echo "LB VIP resource created" + LBVIPUUID=`neutron lb-vip-list -f value | awk '{print $1}'` + LBVIPStatus=`neutron lb-vip-show $LBVIPUUID -f value -c status` + echo "LB VIP resource is in $LBVIPStatus state" + else + echo "LB VIP resource not created" + fi + + LBHMCount=`neutron lb-healthmonitor-list -f value | wc -l` + if [ "$LBHMCount" -eq "1" ]; then + echo "LB Healthmonitor resource created" + else + echo "LB Healthmonitor resource not created" + fi + + gbp policy-target-create --policy-target-group fw_lb-provider provider_pt1 + sleep 5 + LBMemberCount=`neutron lb-member-list -f value | wc -l` + if [ "$LBMemberCount" -eq "1" ]; then + echo "LB Member resource created" + else + echo "LB Member resource not created" + fi + + gbp policy-target-create --policy-target-group fw_lb-provider provider_pt2 + sleep 5 + LBMemberCount=`neutron lb-member-list -f value | wc -l` + if [ "$LBMemberCount" -eq "2" ]; then + echo "LB Member resource created" + else + echo "LB Member resource not created" + fi + + gbp policy-target-delete provider_pt1 + sleep 5 + LBMemberCount=`neutron lb-member-list -f value | wc -l` + if [ "$LBMemberCount" -eq "1" ]; then + echo "LB Member resource deleted" + else + echo "LB Member resource not deleted" + fi + + gbp policy-target-delete provider_pt2 + sleep 5 + LBMemberCount=`neutron lb-member-list -f value | wc -l` + if [ "$LBMemberCount" -eq "0" ]; then + echo "LB Member resource deleted" + else + echo "LB Member resource not deleted" + fi +} + +update_gbp_resources() { + # Update existing chain, by removing 2 rules + #gbp servicechain-node-update FW_LB-FWNODE --template-file $TOP_DIR/nfp-templates/fw_updated_template.yml + + #FirewallRuleCount=`neutron firewall-rule-list -f value | wc -l` + #if [ "$FirewallRuleCount" -eq "2" ]; then + # echo "Chain created" + #else + # echo "Chain not created" + #fi + + gbp group-delete fw_lb-provider + gbp group-delete fw_lb-consumer + ServiceChainInstanceCount=`gbp sci-list -f value | grep fw_lb-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "0" ]; then + echo "Chain deleted" + else + echo "Chain not deleted" + fi + + # Service chain creation/deletion through PRS update + gbp group-create fw_lb-consumer --consumed-policy-rule-sets "fw_lb-webredirect-ruleset=None" + gbp group-create fw_lb-provider + ServiceChainInstanceCount=`gbp sci-list -f value | grep fw_lb-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "0" ]; then + echo "Chain not created" + else + echo "Chain not deleted" + fi + + gbp group-update fw_lb-provider --provided-policy-rule-sets "fw_lb-webredirect-ruleset=None" --network-service-policy fw_lb_nsp + ServiceChainInstanceCount=`gbp sci-list -f value | grep fw_lb-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "1" ]; then + echo "Chain created" + else + echo "Chain not created" + fi +} + +create_gbp_resources +validate_gbp_resources +validate_firewall_resources +validate_loadbalancer_resources + +update_gbp_resources + +delete_gbp_resources diff --git a/gbpservice/tests/contrib/devstack/exercises-nfp/fw_vm.sh b/gbpservice/tests/contrib/devstack/exercises-nfp/fw_vm.sh new file mode 100755 index 0000000000..8d294fee1c --- /dev/null +++ b/gbpservice/tests/contrib/devstack/exercises-nfp/fw_vm.sh @@ -0,0 +1,138 @@ +#!/usr/bin/env bash + +# **fw_vm.sh** + +# Sanity check that firewall(in service VM) service is created with NFP + +echo "*********************************************************************" +echo "Begin NFP Exercise: $0" +echo "*********************************************************************" + +# Settings +# ======== + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +source $TOP_DIR/openrc neutron service + +create_gbp_resources() { + gbp servicechain-node-create --service-profile base_mode_fw_vm --config 'custom_json:{"mimetype": "config/custom+json","rules": [{"action": "log", "name": "tcp", "service": "tcp/80"}, {"action": "log", "name": "tcp", "service": "tcp/8080"}, {"action": "accept", "name": "tcp", "service": "tcp/22"}, {"action": "accept", "name": "icmp", "service": "icmp"}]}' FWNODE + gbp servicechain-spec-create --nodes "FWNODE" fw-chainspec + gbp policy-action-create --action-type REDIRECT --action-value fw-chainspec redirect-to-fw + gbp policy-action-create --action-type ALLOW allow-to-fw + gbp policy-classifier-create --protocol tcp --direction bi fw-web-classifier-tcp + gbp policy-classifier-create --protocol udp --direction bi fw-web-classifier-udp + gbp policy-classifier-create --protocol icmp --direction bi fw-web-classifier-icmp + gbp policy-rule-create --classifier fw-web-classifier-tcp --actions redirect-to-fw fw-web-redirect-rule + gbp policy-rule-create --classifier fw-web-classifier-tcp --actions allow-to-fw fw-web-allow-rule-tcp + gbp policy-rule-create --classifier fw-web-classifier-udp --actions allow-to-fw fw-web-allow-rule-udp + gbp policy-rule-create --classifier fw-web-classifier-icmp --actions allow-to-fw fw-web-allow-rule-icmp + gbp policy-rule-set-create --policy-rules "fw-web-redirect-rule fw-web-allow-rule-tcp fw-web-allow-rule-udp fw-web-allow-rule-icmp" fw-webredirect-ruleset + gbp group-create fw-consumer --consumed-policy-rule-sets "fw-webredirect-ruleset=None" + gbp group-create fw-provider --provided-policy-rule-sets "fw-webredirect-ruleset=None" +} + +delete_gbp_resources() { + gbp group-delete fw-provider + gbp group-delete fw-consumer + gbp policy-rule-set-delete fw-webredirect-ruleset + gbp policy-rule-delete fw-web-redirect-rule + gbp policy-rule-delete fw-web-allow-rule-tcp + gbp policy-rule-delete fw-web-allow-rule-icmp + gbp policy-rule-delete fw-web-allow-rule-udp + gbp policy-classifier-delete fw-web-classifier-tcp + gbp policy-classifier-delete fw-web-classifier-icmp + gbp policy-classifier-delete fw-web-classifier-udp + gbp policy-action-delete redirect-to-fw + gbp policy-action-delete allow-to-fw + gbp servicechain-spec-delete fw-chainspec + gbp servicechain-node-delete FWNODE +} + +validate_gbp_resources() { + ServiceChainInstanceCount=`gbp sci-list -f value | grep fw-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "1" ]; then + echo "Chain creation Succeded" + else + echo "Chain creation failed" + fi +} + +validate_firewall_resources() { + FirewallRuleCount=`neutron firewall-rule-list -f value | grep Rule | wc -l` + if [ "$FirewallRuleCount" -eq "4" ]; then + echo "Firewall Rule resource created" + else + echo "Firewall Rule resource not created" + fi + + FirewallPolicyCount=`neutron firewall-policy-list -f value | grep fw | wc -l` + if [ "$FirewallPolicyCount" -eq "1" ]; then + echo "Firewall Policy resource created" + else + echo "Firewall Policy resource not created" + fi + + FirewallCount=`neutron firewall-list -f value | wc -l` + if [ "$FirewallCount" -eq "1" ]; then + echo "Firewall resource created" + FirewallUUID=`neutron firewall-list -f value | awk '{print $1}'` + FirewallStatus=`neutron firewall-show $FirewallUUID -f value -c status` + echo "Firewall resource is in $FirewallStatus state" + else + echo "Firewall resource not created" + fi +} + +update_gbp_resources() { + # Update existing chain, by removing 2 rules + #gbp servicechain-node-update FWNODE --template-file $TOP_DIR/nfp-templates/fw_updated_template.yml + + #FirewallRuleCount=`neutron firewall-rule-list -f value | wc -l` + #if [ "$FirewallRuleCount" -eq "2" ]; then + # echo "Chain created" + #else + # echo "Chain not created" + #fi + + gbp group-delete fw-provider + gbp group-delete fw-consumer + ServiceChainInstanceCount=`gbp sci-list -f value | grep fw-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "0" ]; then + echo "Chain deleted" + else + echo "Chain not deleted" + fi + + # Service chain creation/deletion through PRS update + gbp group-create fw-consumer --consumed-policy-rule-sets "fw-webredirect-ruleset=None" + gbp group-create fw-provider + ServiceChainInstanceCount=`gbp sci-list -f value | grep fw-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "0" ]; then + echo "Chain not created" + else + echo "Chain not deleted" + fi + + gbp group-update fw-provider --provided-policy-rule-sets "fw-webredirect-ruleset=None" + ServiceChainInstanceCount=`gbp sci-list -f value | grep fw-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "1" ]; then + echo "Chain created" + else + echo "Chain not created" + fi +} + +create_gbp_resources +validate_gbp_resources +validate_firewall_resources + +update_gbp_resources + +delete_gbp_resources diff --git a/gbpservice/tests/contrib/devstack/exercises-nfp/fw_vm_lb.sh b/gbpservice/tests/contrib/devstack/exercises-nfp/fw_vm_lb.sh new file mode 100755 index 0000000000..0614f4c798 --- /dev/null +++ b/gbpservice/tests/contrib/devstack/exercises-nfp/fw_vm_lb.sh @@ -0,0 +1,204 @@ +#!/usr/bin/env bash + +# **fw_vm_lb.sh** + +# Sanity check that firewall(in service VM) and loadbalancer service chain is created with NFP + +echo "*********************************************************************" +echo "Begin NFP Exercise: $0" +echo "*********************************************************************" + +# Settings +# ======== + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +source $TOP_DIR/openrc neutron service + +create_gbp_resources() { + # E-W insertion + gbp servicechain-node-create --service-profile base_mode_fw_vm --config 'custom_json:{"mimetype": "config/custom+json","rules": [{"action": "log", "name": "tcp", "service": "tcp/80"}, {"action": "log", "name": "tcp", "service": "tcp/8080"}, {"action": "accept", "name": "tcp", "service": "tcp/22"}, {"action": "accept", "name": "icmp", "service": "icmp"}]}' FW_LB-FWNODE + gbp servicechain-node-create --service-profile base_mode_lb --template-file $TOP_DIR/nfp-templates/haproxy.template FW_LB-LBNODE + gbp servicechain-spec-create --nodes "FW_LB-FWNODE FW_LB-LBNODE" fw_lb_chainspec + gbp policy-action-create --action-type REDIRECT --action-value fw_lb_chainspec redirect-to-fw_lb + gbp policy-classifier-create --protocol tcp --direction bi fw_lb-webredirect + gbp policy-rule-create --classifier fw_lb-webredirect --actions redirect-to-fw_lb fw_lb-web-redirect-rule + gbp policy-rule-set-create --policy-rules "fw_lb-web-redirect-rule" fw_lb-webredirect-ruleset + gbp network-service-policy-create --network-service-params type=ip_single,name=vip_ip,value=self_subnet fw_lb_nsp + gbp group-create fw_lb-consumer --consumed-policy-rule-sets "fw_lb-webredirect-ruleset=None" + gbp group-create fw_lb-provider --provided-policy-rule-sets "fw_lb-webredirect-ruleset=None" --network-service-policy fw_lb_nsp +} + +delete_gbp_resources() { + gbp group-delete fw_lb-provider + gbp group-delete fw_lb-consumer + gbp network-service-policy-delete fw_lb_nsp + gbp policy-rule-set-delete fw_lb-webredirect-ruleset + gbp policy-rule-delete fw_lb-web-redirect-rule + gbp policy-classifier-delete fw_lb-webredirect + gbp policy-action-delete redirect-to-fw_lb + gbp servicechain-spec-delete fw_lb_chainspec + gbp servicechain-node-delete FW_LB-LBNODE + gbp servicechain-node-delete FW_LB-FWNODE +} + +validate_gbp_resources() { + ServiceChainInstanceCount=`gbp sci-list -f value | grep fw_lb-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "1" ]; then + echo "Chain creation Succeded" + else + echo "Chain creation failed" + fi + + ServiceChainNodeCount=`gbp scn-list -f value | grep FW_LB | wc -l` + if [ "$ServiceChainNodeCount" -eq "2" ]; then + echo "Network function creation Succeded" + else + echo "Network function creation failed" + fi +} + +validate_firewall_resources() { + FirewallRuleCount=`neutron firewall-rule-list -f value | grep Rule | wc -l` + if [ "$FirewallRuleCount" -eq "4" ]; then + echo "Firewall Rule resource created" + else + echo "Firewall Rule resource not created" + fi + + FirewallPolicyCount=`neutron firewall-policy-list -f value | grep fw | wc -l` + if [ "$FirewallPolicyCount" -eq "1" ]; then + echo "Firewall Policy resource created" + else + echo "Firewall Policy resource not created" + fi + + FirewallCount=`neutron firewall-list -f value | wc -l` + if [ "$FirewallCount" -eq "1" ]; then + echo "Firewall resource created" + FirewallUUID=`neutron firewall-list -f value | awk '{print $1}'` + FirewallStatus=`neutron firewall-show $FirewallUUID -f value -c status` + echo "Firewall resource is in $FirewallStatus state" + else + echo "Firewall resource not created" + fi +} + +validate_loadbalancer_resources() { + LBPoolCount=`neutron lb-pool-list -f value | wc -l` + if [ "$LBPoolCount" -eq "1" ]; then + echo "LB Pool resource created" + LBPoolUUID=`neutron lb-pool-list -f value | awk '{print $1}'` + LBPoolStatus=`neutron lb-pool-show $LBPoolUUID -f value -c status` + echo "LB Pool resource is in $LBPoolStatus state" + else + echo "LB Pool resource not created" + fi + + LBVIPCount=`neutron lb-vip-list -f value | wc -l` + if [ "$LBVIPCount" -eq "1" ]; then + echo "LB VIP resource created" + LBVIPUUID=`neutron lb-vip-list -f value | awk '{print $1}'` + LBVIPStatus=`neutron lb-vip-show $LBVIPUUID -f value -c status` + echo "LB VIP resource is in $LBVIPStatus state" + else + echo "LB VIP resource not created" + fi + + LBHMCount=`neutron lb-healthmonitor-list -f value | wc -l` + if [ "$LBHMCount" -eq "1" ]; then + echo "LB Healthmonitor resource created" + else + echo "LB Healthmonitor resource not created" + fi + + gbp policy-target-create --policy-target-group fw_lb-provider provider_pt1 + sleep 5 + LBMemberCount=`neutron lb-member-list -f value | wc -l` + if [ "$LBMemberCount" -eq "1" ]; then + echo "LB Member resource created" + else + echo "LB Member resource not created" + fi + + gbp policy-target-create --policy-target-group fw_lb-provider provider_pt2 + sleep 5 + LBMemberCount=`neutron lb-member-list -f value | wc -l` + if [ "$LBMemberCount" -eq "2" ]; then + echo "LB Member resource created" + else + echo "LB Member resource not created" + fi + + gbp policy-target-delete provider_pt1 + sleep 5 + LBMemberCount=`neutron lb-member-list -f value | wc -l` + if [ "$LBMemberCount" -eq "1" ]; then + echo "LB Member resource deleted" + else + echo "LB Member resource not deleted" + fi + + gbp policy-target-delete provider_pt2 + sleep 5 + LBMemberCount=`neutron lb-member-list -f value | wc -l` + if [ "$LBMemberCount" -eq "0" ]; then + echo "LB Member resource deleted" + else + echo "LB Member resource not deleted" + fi +} + +update_gbp_resources() { + # Update existing chain, by removing 2 rules + #gbp servicechain-node-update FW_LB-FWNODE --template-file $TOP_DIR/nfp-templates/fw_updated_template.yml + + #FirewallRuleCount=`neutron firewall-rule-list -f value | wc -l` + #if [ "$FirewallRuleCount" -eq "2" ]; then + # echo "Chain created" + #else + # echo "Chain not created" + #fi + + gbp group-delete fw_lb-provider + gbp group-delete fw_lb-consumer + ServiceChainInstanceCount=`gbp sci-list -f value | grep fw_lb-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "0" ]; then + echo "Chain deleted" + else + echo "Chain not deleted" + fi + + # Service chain creation/deletion through PRS update + gbp group-create fw_lb-consumer --consumed-policy-rule-sets "fw_lb-webredirect-ruleset=None" + gbp group-create fw_lb-provider + ServiceChainInstanceCount=`gbp sci-list -f value | grep fw_lb-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "0" ]; then + echo "Chain not created" + else + echo "Chain not deleted" + fi + + gbp group-update fw_lb-provider --provided-policy-rule-sets "fw_lb-webredirect-ruleset=None" --network-service-policy fw_lb_nsp + ServiceChainInstanceCount=`gbp sci-list -f value | grep fw_lb-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "1" ]; then + echo "Chain created" + else + echo "Chain not created" + fi +} + +create_gbp_resources +validate_gbp_resources +validate_firewall_resources +validate_loadbalancer_resources + +update_gbp_resources + +delete_gbp_resources diff --git a/gbpservice/tests/contrib/devstack/exercises-nfp/lb.sh b/gbpservice/tests/contrib/devstack/exercises-nfp/lb.sh new file mode 100755 index 0000000000..4b621f22f6 --- /dev/null +++ b/gbpservice/tests/contrib/devstack/exercises-nfp/lb.sh @@ -0,0 +1,159 @@ + +#!/usr/bin/env bash + +# **lb.sh** + +# Sanity check that loadbalancer service is created with NFP + +echo "*********************************************************************" +echo "Begin NFP Exercise: $0" +echo "*********************************************************************" + +# Settings +# ======== + +# This script exits on an error so that errors don't compound and you see +# only the first error that occurred. +set -o errexit + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +source $TOP_DIR/openrc neutron service + +create_gbp_resources() { + # E-W insertion + gbp servicechain-node-create --service-profile base_mode_lb --template-file $TOP_DIR/nfp-templates/haproxy.template LB-NODE + gbp servicechain-spec-create --nodes "LB-NODE" lb_chainspec + gbp policy-action-create --action-type REDIRECT --action-value lb_chainspec redirect-to-lb + gbp policy-classifier-create --protocol tcp --direction bi lb-webredirect + gbp policy-rule-create --classifier lb-webredirect --actions redirect-to-lb lb-webredirect-rule + gbp policy-rule-set-create --policy-rules "lb-webredirect-rule" lb-webredirect-ruleset + gbp network-service-policy-create --network-service-params type=ip_single,name=vip_ip,value=self_subnet lb_nsp + gbp group-create lb-consumer --consumed-policy-rule-sets "lb-webredirect-ruleset=None" + gbp group-create lb-provider --provided-policy-rule-sets "lb-webredirect-ruleset=None" --network-service-policy lb_nsp +} + +delete_gbp_resources() { + gbp group-delete lb-consumer + gbp group-delete lb-provider + gbp network-service-policy-delete lb_nsp + gbp policy-rule-set-delete lb-webredirect-ruleset + gbp policy-rule-delete lb-webredirect-rule + gbp policy-classifier-delete lb-webredirect + gbp policy-action-delete redirect-to-lb + gbp servicechain-spec-delete lb_chainspec + gbp servicechain-node-delete LB-NODE +} + +validate_gbp_resources() { + ServiceChainInstanceCount=`gbp sci-list -f value | grep lb-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "1" ]; then + echo "Chain creation Succeded" + else + echo "Chain creation failed" + fi +} + +validate_loadbalancer_resources() { + LBPoolCount=`neutron lb-pool-list -f value | wc -l` + if [ "$LBPoolCount" -eq "1" ]; then + echo "LB Pool resource created" + LBPoolUUID=`neutron lb-pool-list -f value | awk '{print $1}'` + LBPoolStatus=`neutron lb-pool-show $LBPoolUUID -f value -c status` + echo "LB Pool resource is in $LBPoolStatus state" + else + echo "LB Pool resource not created" + fi + + LBVIPCount=`neutron lb-vip-list -f value | wc -l` + if [ "$LBVIPCount" -eq "1" ]; then + echo "LB VIP resource created" + LBVIPUUID=`neutron lb-vip-list -f value | awk '{print $1}'` + LBVIPStatus=`neutron lb-vip-show $LBVIPUUID -f value -c status` + echo "LB VIP resource is in $LBVIPStatus state" + else + echo "LB VIP resource not created" + fi + + LBHMCount=`neutron lb-healthmonitor-list -f value | wc -l` + if [ "$LBHMCount" -eq "1" ]; then + echo "LB Healthmonitor resource created" + else + echo "LB Healthmonitor resource not created" + fi + + gbp policy-target-create --policy-target-group lb-provider provider_pt1 + sleep 5 + LBMemberCount=`neutron lb-member-list -f value | wc -l` + if [ "$LBMemberCount" -eq "1" ]; then + echo "LB Member resource created" + else + echo "LB Member resource not created" + fi + + gbp policy-target-create --policy-target-group lb-provider provider_pt2 + sleep 5 + LBMemberCount=`neutron lb-member-list -f value | wc -l` + if [ "$LBMemberCount" -eq "2" ]; then + echo "LB Member resource created" + else + echo "LB Member resource not created" + fi + + gbp policy-target-delete provider_pt1 + sleep 5 + LBMemberCount=`neutron lb-member-list -f value | wc -l` + if [ "$LBMemberCount" -eq "1" ]; then + echo "LB Member resource deleted" + else + echo "LB Member resource not deleted" + fi + + gbp policy-target-delete provider_pt2 + sleep 5 + LBMemberCount=`neutron lb-member-list -f value | wc -l` + if [ "$LBMemberCount" -eq "0" ]; then + echo "LB Member resource deleted" + else + echo "LB Member resource not deleted" + fi +} + +update_gbp_resources() { + gbp group-delete lb-provider + gbp group-delete lb-consumer + ServiceChainInstanceCount=`gbp sci-list -f value | grep lb-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "0" ]; then + echo "Chain deleted" + else + echo "Chain not deleted" + fi + + # Service chain creation/deletion through PRS update + gbp group-create lb-consumer --consumed-policy-rule-sets "lb-webredirect-ruleset=None" + gbp group-create lb-provider + ServiceChainInstanceCount=`gbp sci-list -f value | grep lb-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "0" ]; then + echo "Chain not created" + else + echo "Chain not deleted" + fi + + gbp group-update lb-provider --provided-policy-rule-sets "lb-webredirect-ruleset=None" --network-service-policy lb_nsp + ServiceChainInstanceCount=`gbp sci-list -f value | grep lb-provider | wc -l` + if [ "$ServiceChainInstanceCount" -eq "1" ]; then + echo "Chain created" + else + echo "Chain not created" + fi +} + +create_gbp_resources +validate_gbp_resources +validate_loadbalancer_resources + +update_gbp_resources + +delete_gbp_resources diff --git a/gbpservice/tests/contrib/devstack/local-nfp.conf b/gbpservice/tests/contrib/devstack/local-nfp.conf new file mode 100644 index 0000000000..0a1dca2f84 --- /dev/null +++ b/gbpservice/tests/contrib/devstack/local-nfp.conf @@ -0,0 +1,140 @@ +[[local|localrc]] +DATABASE_PASSWORD=abc123 +ADMIN_PASSWORD=abc123 +MYSQL_PASSWORD=$DATABASE_PASSWORD +RABBIT_PASSWORD=abc123 +SERVICE_PASSWORD=$ADMIN_PASSWORD +SERVICE_TOKEN=abc123 + +ENABLE_NFP=True +NFP_DEVSTACK_MODE=base + +Q_SERVICE_PLUGIN_CLASSES=neutron.services.l3_router.l3_router_plugin.L3RouterPlugin,group_policy,ncp + + +# Using group-policy branches +# --------------------------- + +GIT_BASE=${GIT_BASE:-git://git.openstack.org} + +#GBPSERVICE_REPO=${GIT_BASE}/openstack/group-based-policy.git +#GBPSERVICE_BRANCH=master +GBPSERVICE_REPO=https://review.openstack.org/openstack/group-based-policy +GBPSERVICE_BRANCH=refs/changes/05/335405/72 + +GBPCLIENT_REPO=${GIT_BASE}/openstack/python-group-based-policy-client.git +GBPCLIENT_BRANCH=master +#GBPCLIENT_REPO=https://review.openstack.org/openstack/python-group-based-policy-client +#GBPCLIENT_BRANCH=refs/changes/95/311695/3 + +GBPUI_REPO=${GIT_BASE}/openstack/group-based-policy-ui.git +GBPUI_BRANCH=master +#GBPUI_REPO=https://review.openstack.org/openstack/group-based-policy-ui +#GBPUI_BRANCH=refs/changes/02/136802/14 + +GBPHEAT_REPO=${GIT_BASE}/openstack/group-based-policy-automation.git +GBPHEAT_BRANCH=master +#GBPHEAT_REPO= +#GBPHEAT_BRANCH= + +# Enable neutron for group-policy-poc +# ----------------------------------- +disable_service n-net +#disable_service h-eng +#disable_service h-api +#disable_service h-api-cfn +#disable_service h-api-cw +enable_service q-svc +enable_service q-agt +enable_service q-dhcp +enable_service q-l3 +enable_service q-fwaas +[[ $ENABLE_NFP = True ]] && [[ $NFP_DEVSTACK_MODE = advanced ]] && enable_service neutron-vpnaas +enable_service q-lbaas +enable_service q-meta +enable_service neutron +enable_service group-policy +disable_service tempest +ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng + +if [[ $ENABLE_NFP = True ]]; then + # NFP services + enable_service nfp_orchestrator + enable_service nfp_proxy + enable_service nfp_proxy_agent + [[ $NFP_DEVSTACK_MODE = base ]] && enable_service nfp_base_configurator + [[ $NFP_DEVSTACK_MODE = advanced ]] && enable_service nfp_config_orchestrator + [[ $NFP_DEVSTACK_MODE = advanced ]] && enable_plugin neutron-vpnaas https://git.openstack.org/openstack/neutron-vpnaas stable/mitaka + +fi + + +ConfiguratorQcow2Image= +VyosQcow2Image= +HaproxyQcow2Image= + + + + + +LOG_COLOR=False +DEST=/opt/stack/new +SCREEN_LOGDIR=$DEST/logs/screen +LOGFILE=$DEST/logs/stack.sh.log +SKIP_EXERCISES=volumes,trove,swift,sahara,euca,bundle,boot_from_volume,aggregates,zaqar,client-env,client-args,sec_groups,neutron-adv-test,floating_ips,horizon + +#OFFLINE=True +RECLONE=True + +# Group-based Policy configuration +# Comment the lines below if you don't want to configure the datapath +# and use the dummy driver. +[[post-config|/etc/nova/nova.conf]] +[neutron] +allow_duplicate_networks = True + +#[[post-config|/etc/heat/heat.conf]] +#[DEFAULT] +#plugin_dirs=/opt/stack/gbpautomation/gbpautomation/heat + +[[post-config|/etc/neutron/neutron.conf]] +[keystone_authtoken] +admin_tenant_name = service +admin_user = neutron +admin_password = abc123 + +[group_policy] +policy_drivers=implicit_policy,resource_mapping,chain_mapping +extension_drivers=proxy_group + +[servicechain] +servicechain_drivers = simplechain_driver +#servicechain_drivers = chain_with_two_arm_appliance_driver + +[node_composition_plugin] +node_plumber = admin_owned_resources_apic_plumber +node_drivers = heat_node_driver,nfp_node_driver + +[admin_owned_resources_apic_tscp] +plumbing_resource_owner_user = neutron +plumbing_resource_owner_password = abc123 +plumbing_resource_owner_tenant_name = service + +[group_policy_implicit_policy] +default_ip_pool = 10.0.0.0/8 +default_proxy_ip_pool = 192.169.0.0/16 +default_external_segment_name = default + +[nfp_node_driver] +is_service_admin_owned = True +svc_management_ptg_name = svc_management_ptg + +[quotas] +default_quota = -1 +quota_network = -1 +quota_subnet = -1 +quota_port = -1 +quota_security_group = -1 +quota_security_group_rule = -1 +quota_router = -1 +quota_floatingip = -1 diff --git a/gbpservice/tests/contrib/devstack/nfp b/gbpservice/tests/contrib/devstack/nfp new file mode 100644 index 0000000000..309818917a --- /dev/null +++ b/gbpservice/tests/contrib/devstack/nfp @@ -0,0 +1,429 @@ +#!/bin/bash +NEW_BASE="$BASE/new" +DISKIMAGE_CREATE_DIR=$NEW_BASE/group-based-policy/gbpservice/tests/contrib/diskimage_create +DEVSTACK_DIR=$NEW_BASE/devstack +NFPSERVICE_DIR=$NEW_BASE/group-based-policy +NEUTRON_CONF_DIR=/etc/neutron +NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf +NFP_CONF_DIR=/etc/nfp + +ENABLE_NFP=${ENABLE_NFP:-False} + +ConfiguratorQcow2Image=${ConfiguratorQcow2Image:-build} +#VyosQcow2Image=${VyosQcow2Image:-build} +#HaproxyQcow2Image=${HaproxyQcow2Image:-build} + + +function prepare_nfp_image_builder { + #setup_develop $NFPSERVICE_DIR + sudo -H -E pip install -r $DISKIMAGE_CREATE_DIR/requirements.txt + sudo apt-get install -y --force-yes qemu-utils + sudo apt-get install -y --force-yes dpkg-dev + if [[ $NFP_DEVSTACK_MODE = advanced ]]; then + sudo wget -qO- https://get.docker.com/ | bash + fi + +} + +function assign_user_role_credential { + source $DEVSTACK_DIR/openrc admin admin + #set -x + serviceTenantID=`keystone tenant-list | grep "service" | awk '{print $2}'` + serviceRoleID=`keystone role-list | grep "service" | awk '{print $2}'` + adminRoleID=`keystone role-list | grep "admin" | awk '{print $2}'` + keystone user-role-add --user nova --tenant $serviceTenantID --role $serviceRoleID + keystone user-role-add --user neutron --tenant $serviceTenantID --role $adminRoleID +} + + + +# create_nfp_gbp_resources() - Create various GBP resources +function create_nfp_gbp_resources { + source $DEVSTACK_DIR/openrc neutron service + unset OS_USER_DOMAIN_ID + unset OS_PROJECT_DOMAIN_ID + + if [[ $NFP_DEVSTACK_MODE = base ]]; then + + IMAGE_NAME="reference_configurator_image" + FLAVOR=m1.nfp-tiny + + gbp service-profile-create\ + --servicetype LOADBALANCER\ + --insertion-mode l3\ + --shared True\ + --service-flavor service_vendor=haproxy,device_type=None\ + --vendor NFP\ + base_mode_lb + + gbp service-profile-create\ + --servicetype FIREWALL\ + --insertion-mode l3\ + --shared True\ + --service-flavor service_vendor=nfp,device_type=nova,image_name=$IMAGE_NAME,flavor=$FLAVOR\ + --vendor NFP\ + base_mode_fw_vm + else + gbp service-profile-create\ + --servicetype LOADBALANCER\ + --insertion-mode l3\ + --shared True\ + --service-flavor service_vendor=haproxy,device_type=nova\ + --vendor NFP\ + lb_profile + + gbp service-profile-create\ + --servicetype LOADBALANCERV2\ + --insertion-mode l3\ + --shared True\ + --service-flavor service_vendor=haproxy_lbaasv2,device_type=nova,flavor=m1.small\ + --vendor NFP\ + lbv2_profile + + gbp service-profile-create\ + --servicetype FIREWALL\ + --insertion-mode l3\ + --shared True\ + --service-flavor service_vendor=vyos,device_type=nova\ + --vendor NFP\ + vyos_fw_profile + + gbp service-profile-create\ + --servicetype VPN\ + --insertion-mode l3\ + --shared True\ + --service-flavor service_vendor=vyos,device_type=nova\ + --vendor NFP\ + vpn_profile + + gbp nsp-create\ + --network-service-params type=ip_pool,name=vip_ip,value=nat_pool\ + svc_mgmt_fip_policy + + + fi + + gbp l3policy-create\ + --ip-version 4\ + --proxy-ip-pool=192.169.0.0/24\ + --ip-pool 120.0.0.0/24\ + --subnet-prefix-length 24\ + service_management + + gbp l2policy-create\ + --l3-policy service_management\ + svc_management_ptg + + gbp group-create\ + svc_management_ptg\ + --service_management True\ + --l2-policy\ + svc_management_ptg + + neutron router-gateway-clear\ + l3p_service_management + + gbp l3policy-update\ + --external-segment ""\ + service_management + +} + +# create_port_for_vm() - Create a port, and get its details +# Args: +# $1 - image_name +# $2 - instance name +function create_port_for_vm { + GROUP="svc_management_ptg" + PortId=$(gbp policy-target-create --policy-target-group $GROUP $2 | grep port_id | awk '{print $4}') + IpAddr_extractor=`neutron port-list --format value | grep $PortId | awk '{print $7}'` + IpAddr_purge_last=${IpAddr_extractor::-1} + IpAddr=${IpAddr_purge_last//\"/} + echo "IpAddr of port($PortId): $IpAddr" + configurator_image_name=$1 + configurator_port_id=$PortId + configurator_ip=$IpAddr +} + +# create_nfp_image() - Create and upload the service images +function create_nfp_image { + source $DEVSTACK_DIR/openrc neutron service + unset OS_USER_DOMAIN_ID + unset OS_PROJECT_DOMAIN_ID + + if [[ $NFP_DEVSTACK_MODE = base ]]; then + RefConfiguratorQcow2ImageName=reference_configurator_image + echo "Building Image: $RefConfiguratorQcow2ImageName" + sudo python $DISKIMAGE_CREATE_DIR/disk_image_create.py $DISKIMAGE_CREATE_DIR/ref_configurator_conf.json + RefConfiguratorQcow2Image=$(cat $DISKIMAGE_CREATE_DIR/output/last_built_image_path) + echo "Uploading Image: $RefConfiguratorQcow2ImageName" + glance image-create --name $RefConfiguratorQcow2ImageName --disk-format qcow2 --container-format bare --visibility public --file $RefConfiguratorQcow2Image + openstack --os-cloud=devstack-admin flavor create --ram 512 --disk 3 --vcpus 1 m1.nfp-tiny + else + ConfiguratorQcow2ImageName=configurator + ConfiguratorInstanceName="configuratorVM_instance" + create_port_for_vm $ConfiguratorQcow2ImageName $ConfiguratorInstanceName + if [[ $ConfiguratorQcow2Image = build ]]; then + echo "Building Image: $ConfiguratorQcow2ImageName" + sudo python $DISKIMAGE_CREATE_DIR/disk_image_create.py $DISKIMAGE_CREATE_DIR/configurator_conf.json $GBPSERVICE_BRANCH + ConfiguratorQcow2Image=$(cat $DISKIMAGE_CREATE_DIR/output/last_built_image_path) + fi + echo "Uploading Image: $ConfiguratorQcow2ImageName" + glance image-create --name $ConfiguratorQcow2ImageName --disk-format qcow2 --container-format bare --visibility public --file $ConfiguratorQcow2Image + + VyosQcow2ImageName=vyos + if ! [[ -z $VyosQcow2Image ]]; then + echo "Uploading Image: $VyosQcow2ImageName" + glance image-create --name $VyosQcow2ImageName --disk-format qcow2 --container-format bare --visibility public --file $VyosQcow2Image + fi + + HaproxyQcow2ImageName=haproxy + if ! [[ -z $HaproxyQcow2Image ]]; then + echo "Uploading Image: $HaproxyQcow2ImageName" + glance image-create --name $HaproxyQcow2ImageName --disk-format qcow2 --container-format bare --visibility public --file $HaproxyQcow2Image + fi + fi +} + +# configure_configurator_user_data() - Configure Configurator user data +function configure_configurator_user_data { + CUR_DIR=$PWD + sudo rm -rf /opt/configurator_user_data + sudo cp -r $NFPSERVICE_DIR/devstack/exercises/nfp_service/user-data/configurator_user_data /opt/. + cd /opt + sudo rm -rf my.key my.key.pub + sudo ssh-keygen -t rsa -N "" -f my.key + value=`sudo cat my.key.pub` + sudo echo $value + sudo sed -i "8 i\ -\ $value" configurator_user_data + sudo sed -i '9d' configurator_user_data + cd $CUR_DIR +} + +# launch_configuratorVM() - Launch the Configurator VM +function launch_configuratorVM { + echo "Collecting ImageId : for $configurator_image_name" + ImageId=`glance image-list | grep $configurator_image_name | awk '{print $2}'` + if [ ! -z "$ImageId" -a "$ImageId" != " " ]; then + echo $ImageId + else + echo "No image found with name $configurator_image_name" + exit + fi + + configure_configurator_user_data + nova boot\ + --flavor m1.medium\ + --user-data /opt/configurator_user_data\ + --image $ImageId\ + --nic port-id=$configurator_port_id\ + $ConfiguratorInstanceName + sleep 10 +} + + +# namespace_delete() - Utility for namespace management +function namespace_delete { + source $DEVSTACK_DIR/openrc neutron service + + #Deletion namespace + NFP_P=`sudo ip netns | grep "nfp-proxy"` + if [ ${#NFP_P} -ne 0 ]; then + sudo ip netns delete nfp-proxy + echo "namespace removed" + fi + + #Delete veth peer + PEER=`ip a | grep pt1` + if [ ${#PEER} -ne 0 ]; then + echo "veth peer removed" + sudo ip link delete pt1 + fi + + #pt1 port removing from ovs + PORT=`sudo ovs-vsctl show | grep "pt1"` + if [ ${#PORT} -ne 0 ]; then + sudo ovs-vsctl del-port br-int pt1 + echo "ovs port ptr1 is removed" + fi + + echo "nfp-proxy cleaning success." +} + +# namespace_create() - Utility for namespace management +function namespace_create { + SERVICE_MGMT_NET="l2p_svc_management_ptg" + echo "Creating new namespace nfp-proxy...." + + #new namespace with name proxy + NFP_P=`sudo ip netns add nfp-proxy` + if [ ${#NFP_P} -eq 0 ]; then + echo "New namepace nfp-proxy created" + else + echo "New namespace nfp-proxy creation failed" + exit 0 + fi + + #Create veth peer + PEER=`sudo ip link add pt0 type veth peer name pt1` + if [ ${#PEER} -eq 0 ]; then + echo "New veth pair created" + else + echo "veth pair creation failed" + exit 0 + fi + sleep 1 + + #move one side of veth into namesape + sudo ip link set pt0 netns nfp-proxy + + #create new neutron port in service mgmt network + new_ip=`neutron port-create $SERVICE_MGMT_NET | grep "fixed_ips" | awk '{print $7}' | sed 's/^\"\(.*\)\"}$/\1/'` + if [ ${#new_ip} -lt 5 ]; then + echo "new_ip =$new_ip" + echo "Neutron port creation failed (check source) " + exit 0 + else + echo "New Neutron Port Created on Service management network with ip =$new_ip" + fi + new_ip_cidr+="$new_ip/24" + sleep 2 + 396,1 79% + #get the ip address of new port eg : 11.0.0.6 and asign to namespace + sudo ip netns exec nfp-proxy ip addr add $new_ip_cidr dev pt0 + + #move other side of veth into ovs : br-int + sudo ovs-vsctl add-port br-int pt1 + + #get id of service management network + smn_id=`neutron net-list | grep "$SERVICE_MGMT_NET" | awk '{print $2}'` + + #get the dhcp namespace of service management network + nm_space=`sudo ip netns | grep "$smn_id"` + + #get port id from router nampace + port=`sudo ip netns exec $nm_space ip a | grep "tap" | tail -n 1 | awk '{print $7}'` + + #get tag_id form port in ovs-bridge + tag_id=`sudo ovs-vsctl list port $port | grep "tag" | tail -n 1 | awk '{print $3}'` + + sudo ovs-vsctl set port pt1 tag=$tag_id + + #up the both ports + sudo ip netns exec nfp-proxy ip link set pt0 up + sudo ip netns exec nfp-proxy ip link set lo up + sudo ip link set pt1 up + + PING=`sudo ip netns exec nfp-proxy ping $configurator_ip -q -c 2 > /dev/null` + if [ ${#PING} -eq 0 ]; then + echo "nfp-proxy namespcace creation success and reaching to $configurator_ip" + else + echo "Fails reaching to $configurator_ip" + fi + sudo ip netns exec nfp-proxy /usr/bin/nfp_proxy --config-file=/etc/nfp_proxy.ini +} + +function copy_nfp_files_and_start_process { + cd $NFPSERVICE_DIR/gbpservice/nfp + sudo cp -r bin/nfp /usr/bin/ + sudo chmod +x /usr/bin/nfp + sudo rm -rf /etc/nfp_* + sudo cp -r bin/nfp_orchestrator.ini /etc/ + sudo cp -r bin/nfp_proxy_agent.ini /etc/ + [[ $NFP_DEVSTACK_MODE = advanced ]] && sudo cp -r bin/nfp_config_orch.ini /etc/ + sudo cp -r bin/nfp_proxy.ini /etc/nfp_proxy.ini + sudo cp -r bin/nfp_proxy /usr/bin/ + + if [[ $NFP_DEVSTACK_MODE = base ]]; then + configurator_ip=127.0.0.1 + configurator_port=8080 + else + configurator_ip=$configurator_ip + configurator_port=8070 + fi + echo "Configuring proxy.ini .... with rest_server_address as $configurator_ip:$configurator_port" + sudo sed -i "s/rest_server_address=*.*/rest_server_address=$configurator_ip/g" /etc/nfp_proxy.ini + sudo sed -i "s/rest_server_port= *.*/rest_server_port=$configurator_port/g" /etc/nfp_proxy.ini + + sed -i 's#source.*#source '$DEVSTACK_DIR'/openrc demo demo#g' $NFPSERVICE_DIR/devstack/exercises/nfp_service/*.sh + source $DEVSTACK_DIR/functions-common + + echo "Starting nfp_orchestrator under screen named nfp_orchestrator" + run_process nfp_orchestrator "sudo /usr/bin/nfp --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /etc/nfp_orchestrator.ini --log-file $DEST/logs/nfp_orchestrator.log" + sleep 4 + + echo "Starting nfp_proxy_agent under screen named nfp_proxy_agent" + run_process nfp_proxy_agent "sudo /usr/bin/nfp --config-file /etc/nfp_proxy_agent.ini --log-file $DEST/logs/nfp_proxy_agent.log" + sleep 4 + + echo "Starting nfp_proxy inside namespace named nfp-proxy, under screen named nfp_proxy" + run_process nfp_proxy "source $NFPSERVICE_DIR/devstack/lib/nfp; namespace_delete; namespace_create" + sleep 10 + if [[ $NFP_DEVSTACK_MODE = advanced ]]; then + echo "Starting nfp_config_orchestrator under screen named nfp_config_orchestrator" + run_process nfp_config_orchestrator "sudo /usr/bin/nfp --config-file /etc/nfp_config_orch.ini --config-file /etc/neutron/neutron.conf --log-file $DEST/logs/nfp_config_orchestrator.log" + else + cd pecan/api + sudo python setup.py develop + echo "Starting nfp_base_configurator under screen named nfp_base_configurator" + run_process nfp_base_configurator "cd $NFPSERVICE_DIR/gbpservice/nfp/pecan/api; sudo ip netns exec nfp-proxy pecan configurator_decider config.py --mode base" + fi + sleep 1 + + echo "Upgrading DB to HEAD" + source $DEVSTACK_DIR/openrc neutron service + gbp-db-manage --config-file /etc/neutron/neutron.conf upgrade head + sleep 2 + + echo "NFP configuration done." +} + +function configure_nfp_loadbalancer { + echo "Configuring NFP Loadbalancer plugin driver" + sudo\ + sed\ + -i\ + '/^service_provider.*HaproxyOnHostPluginDriver:default/'\ +'s'/\ +':default'/\ +'\n'\ +'service_provider = LOADBALANCER:loadbalancer:gbpservice.nfp.service_plugins.loadbalancer.drivers.nfp_lbaas_plugin_driver.HaproxyOnVMPluginDriver:default'/\ + /etc/neutron/neutron_lbaas.conf +} + +function configure_nfp_firewall { + echo "Configuring NFP Firewall plugin" + sudo\ + sed\ + -i\ + '/^service_plugins/'\ +'s'/\ +'neutron_fwaas.services.firewall.fwaas_plugin.FirewallPlugin'/\ +'gbpservice.nfp.service_plugins.firewall.nfp_fwaas_plugin.NFPFirewallPlugin'/\ + /etc/neutron/neutron.conf +} + +function configure_nfp_vpn { + echo "Configuring NFP VPN plugin driver" + sudo\ + sed\ + -i\ + '/^service_provider.*IPsecVPNDriver:default/'\ +'s'/\ +':default'/\ +'\n'\ +'service_provider = VPN:vpn:gbpservice.nfp.service_plugins.vpn.drivers.nfp_vpnaas_driver.NFPIPsecVPNDriver:default'/\ + /etc/neutron/neutron_vpnaas.conf +} + + +function nfp_setup { + if [[ $ENABLE_NFP = True ]]; then + prepare_nfp_image_builder + assign_user_role_credential + create_nfp_gbp_resources + create_nfp_image + [[ $NFP_DEVSTACK_MODE = advanced ]] && launch_configuratorVM + copy_nfp_files_and_start_process + fi +} diff --git a/gbpservice/tests/contrib/devstack/nfp-templates/fw_template.yml b/gbpservice/tests/contrib/devstack/nfp-templates/fw_template.yml new file mode 100644 index 0000000000..24f1b9509e --- /dev/null +++ b/gbpservice/tests/contrib/devstack/nfp-templates/fw_template.yml @@ -0,0 +1,45 @@ +heat_template_version: 2013-05-23 + +description: Template to deploy firewall + +resources: + sc_firewall: + type: OS::Neutron::Firewall + + properties: + description: "{'insert_type': 'east_west', 'vm_management_ip': u'192.168.20.138', 'provider_ptg_info': ['fa:16:3e:28:7d:b2']}" + + firewall_policy_id: { get_resource: sc_firewall_policy } + name: "serviceVM_infra_FW" + + sc_firewall_policy: + type: OS::Neutron::FirewallPolicy + properties: + name: "" + firewall_rules: [{ get_resource: sc_firewall_rule1 } , { get_resource: sc_firewall_rule2 }, { get_resource: sc_firewall_rule3 },{get_resource: sc_firewall_rule4 }] + sc_firewall_rule1: + type: OS::Neutron::FirewallRule + properties: + name: "Rule_1" + action: "allow" + protocol: "udp" + sc_firewall_rule2: + type: OS::Neutron::FirewallRule + properties: + name: "Rule_2" + action: "allow" + protocol: "icmp" + sc_firewall_rule3: + type: OS::Neutron::FirewallRule + properties: + name: "Rule_3" + action: "allow" + protocol: "tcp" + destination_port: "22" + sc_firewall_rule4: + type: OS::Neutron::FirewallRule + properties: + name: "Rule_4" + action: "allow" + protocol: "tcp" + destination_port: "80" diff --git a/gbpservice/tests/contrib/devstack/nfp-templates/fw_updated_template.yml b/gbpservice/tests/contrib/devstack/nfp-templates/fw_updated_template.yml new file mode 100644 index 0000000000..2e5b5a3d8a --- /dev/null +++ b/gbpservice/tests/contrib/devstack/nfp-templates/fw_updated_template.yml @@ -0,0 +1,31 @@ +heat_template_version: 2013-05-23 + +description: Template to deploy firewall + +resources: + sc_firewall: + type: OS::Neutron::Firewall + + properties: + description: "{'insert_type': 'east_west', 'vm_management_ip': u'192.168.20.138', 'provider_ptg_info': ['fa:16:3e:28:7d:b2']}" + + firewall_policy_id: { get_resource: sc_firewall_policy } + name: "serviceVM_infra_FW" + + sc_firewall_policy: + type: OS::Neutron::FirewallPolicy + properties: + name: "" + firewall_rules: [{ get_resource: sc_firewall_rule1 } , { get_resource: sc_firewall_rule2 }] + sc_firewall_rule1: + type: OS::Neutron::FirewallRule + properties: + name: "Rule_1" + action: "allow" + protocol: "udp" + sc_firewall_rule2: + type: OS::Neutron::FirewallRule + properties: + name: "Rule_2" + action: "allow" + protocol: "icmp" diff --git a/gbpservice/tests/contrib/devstack/nfp-templates/haproxy.template b/gbpservice/tests/contrib/devstack/nfp-templates/haproxy.template new file mode 100644 index 0000000000..31f3810346 --- /dev/null +++ b/gbpservice/tests/contrib/devstack/nfp-templates/haproxy.template @@ -0,0 +1,65 @@ +{ + "AWSTemplateFormatVersion" : "2010-09-09", + "Description": "Template to test Haproxy Loadbalacer service", + + "Parameters": { + "Subnet": { + "Description": "Pool Subnet CIDR, on which VIP port should be created", + "Type": "String" + }, + "vip_ip": { + "Description": "VIP IP Address", + "Type": "String" + }, + "service_chain_metadata": { + "Description": "sc metadata", + "Type": "String" + } + }, + + "Resources" : { + "HttpHM": { + "Type": "OS::Neutron::HealthMonitor", + "Properties": { + "admin_state_up": true, + "delay": 20, + "expected_codes": "200", + "http_method": "GET", + "max_retries": 3, + "timeout": 10, + "type": "HTTP", + "url_path": "/" + } + }, + "HaproxyPool": { + "Type": "OS::Neutron::Pool", + "Properties": { + "admin_state_up": true, + "description": "Haproxy pool from teplate", + "lb_method": "ROUND_ROBIN", + "monitors": [{"Ref":"HttpHM"}], + "name": "Haproxy pool", + "provider": "haproxy", + "protocol": "HTTP", + "subnet_id": {"Ref":"Subnet"}, + "vip": { + "subnet": {"Ref":"Subnet"}, + "address": {"Ref":"vip_ip"}, + "name": "Haproxy vip", + "description": {"Ref":"service_chain_metadata"}, + "protocol_port": 80, + "connection_limit": -1, + "admin_state_up": true + } + } + }, + "HaproxyLb": { + "Type": "OS::Neutron::LoadBalancer", + "Properties": { + "pool_id": {"Ref":"HaproxyPool"}, + "protocol_port": 80 + } + } + } +} + diff --git a/gbpservice/tests/contrib/diskimage-create/build_image.py b/gbpservice/tests/contrib/diskimage-create/build_image.py new file mode 100755 index 0000000000..6f9b178933 --- /dev/null +++ b/gbpservice/tests/contrib/diskimage-create/build_image.py @@ -0,0 +1,241 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import commands +import os +from oslo_serialization import jsonutils +import subprocess +import sys + + +conf = [] +cur_dir = '' + + +def parse_json(j_file): + global conf + + with open(j_file) as json_data: + conf = jsonutils.load(json_data) + return + + +def set_nfp_git_branch(nfp_branch_name, configurator_dir): + Dockerfile_path = configurator_dir + '/Dockerfile' + cmd = "sudo sed -i \"s/GIT-BRANCH-NAME/%s/g\" %s" % ( + nfp_branch_name.replace('/', '\/'), Dockerfile_path) + os.system(cmd) + + +def create_configurator_docker(nfp_branch_name): + configurator_dir = "%s/../../../contrib/nfp/configurator" % cur_dir + docker_images = "%s/output/docker_images/" % cur_dir + if not os.path.exists(docker_images): + os.makedirs(docker_images) + + # create a docker image + os.chdir(configurator_dir) + set_nfp_git_branch(nfp_branch_name, configurator_dir) + docker_args = ['docker', 'build', '-t', 'configurator-docker', '.'] + ret = subprocess.call(docker_args) + if(ret): + print("Failed to build docker image [configurator-docker]") + return -1 + + os.chdir(docker_images) + del(docker_args) + # save the docker image + docker_args = ['docker', 'save', '-o', 'configurator-docker', + 'configurator-docker'] + ret = subprocess.call(docker_args) + if(ret): + print("Failed to save docker image [configurator-docker]") + return -1 + # set environment variable, needed by 'extra-data.d' + os.environ['DOCKER_IMAGES_PATH'] = docker_images + + return 0 + + +def create_apt_source_list(): + """ + Creates a file 00-haproxy-agent-debs, this will be executed by dib to + create a file haproxy-agent-debs.list file inside VM + at /etc/apt/sources.list.d/ + This file will contain entries for apt to fetch any debs from + our local repo + """ + elems = "%s/elements" % cur_dir + + # update repo_host ip in 00-haproxy-agent-debs file + # this file will be copied to VM at /etc/apt/sources.list.d/ + os.chdir("%s/debs/pre-install.d/" % elems) + with open("00-haproxy-agent-debs", "w") as f: + f.write("#!/bin/bash\n\n") + f.write("set -eu\n") + f.write("set -o xtrace\n\n") + f.write("apt-get install ubuntu-cloud-keyring\n") + if 'haproxy' in conf['dib']['elements']: + tmp_str = ('echo "deb http://%s/ /haproxy/"' + ' > /etc/apt/sources.list.d/haproxy-agent-debs.list' + % 'localhost') + f.write(tmp_str + '\n') + + +def update_haproxy_repo(): + haproxy_vendor_dir = ("%s/../../../nfp/service_vendor_agents/haproxy" + % cur_dir) + service = 'haproxy-agent' + version = '1' + release = '1' + subprocess.call(['rm', '-rf', + "%s/%s/deb-packages" % (haproxy_vendor_dir, service)]) + os.chdir(haproxy_vendor_dir) + ret = subprocess.call(['bash', + 'build_haproxy_agent_deb.sh', + service, + version, release]) + if(ret): + print("ERROR: Unable to generate haproxy-agent deb package") + return 1 + + subprocess.call(["rm", "-rf", "/var/www/html/haproxy"]) + out = subprocess.call(["mkdir", "-p", "/var/www/html/haproxy/"]) + haproxy_agent_deb = ("%s/%s/deb-packages/%s-%s-%s.deb" + % (haproxy_vendor_dir, service, + service, version, release)) + subprocess.call(["cp", haproxy_agent_deb, "/var/www/html/haproxy/"]) + + os.chdir("/var/www/html") + out = commands.getoutput("dpkg-scanpackages haproxy/ /dev/null" + " | gzip -9c > haproxy/Packages.gz") + print(out) + + return 0 + + +def dib(nfp_branch_name): + dib = conf['dib'] + elems = "%s/elements/" % cur_dir + + # set the elements path in environment variable + os.environ['ELEMENTS_PATH'] = elems + # set the Ubuntu Release for the build in environment variable + os.environ['DIB_RELEASE'] = conf['ubuntu_release']['release'] + + # basic elements + dib_args = ['disk-image-create', 'base', 'vm', 'ubuntu'] + + # configures elements + for element in dib['elements']: + dib_args.append(element) + # root login enabled, set password environment varaible + if element == 'root-passwd': + os.environ['DIB_PASSWORD'] = dib['root_password'] + elif element == 'devuser': + os.environ['DIB_DEV_USER_USERNAME'] = 'ubuntu' + os.environ['DIB_DEV_USER_SHELL'] = '/bin/bash' + elif element == 'nfp-reference-configurator': + image_name = 'nfp_reference_service' + service_dir = "%s/../nfp_service/" % cur_dir + pecan_dir = os.path.abspath(os.path.join(cur_dir, + '../../../nfp')) + service_dir = os.path.realpath(service_dir) + pecan_dir = os.path.realpath(pecan_dir) + os.environ['PECAN_GIT_PATH'] = pecan_dir + os.environ['SERVICE_GIT_PATH'] = service_dir + if 'devuser' in dib['elements']: + os.environ['SSH_RSS_KEY'] = ( + "%s/output/%s" % (cur_dir, image_name)) + os.environ['DIB_DEV_USER_AUTHORIZED_KEYS'] = ( + "%s.pub" % os.environ['SSH_RSS_KEY']) + elif element == 'configurator': + image_name = 'configurator' + create_configurator_docker(nfp_branch_name) + # for bigger size images + dib_args.append('--no-tmpfs') + elif element == 'haproxy': + image_name = 'haproxy' + dib_args.append('debs') + create_apt_source_list() + + # offline mode, assuming the image cache (tar) already exists + dib_args.append('--offline') + cache_path = dib['cache_path'].replace('~', os.environ.get('HOME', '-1')) + dib_args.append('--image-cache') + dib_args.append(cache_path) + + dib_args.append('--image-size') + dib_args.append(str(dib['image_size_in_GB'])) + dib_args.append('-o') + dib_args.append(str(image_name)) + + os.chdir(cur_dir) + out_dir = 'output' + if not os.path.isdir(out_dir): + os.makedirs(out_dir) + os.chdir(out_dir) + print("DIB-ARGS: %r" % dib_args) + + ret = subprocess.call(dib_args) + if not ret: + image_path = "%s/output/%s.qcow2" % (cur_dir, image_name) + print("Image location: %s" % image_path) + + +if __name__ == "__main__": + + if len(sys.argv) < 2: + print("ERROR: Invalid Usage") + print("Usage:\n\t%s [NFP_BRANCH_NAME]" + % sys.argv[0]) + print("\twhere: contains all the configuration") + print("\tand NFP_BRANCH_NAME is the string, and is optional.") + exit() + + # save PWD + cur_dir = os.path.dirname(__file__) + cur_dir = os.path.realpath(cur_dir) + if not cur_dir: + # if script is executed from current dir, get abs path + cur_dir = os.path.realpath('./') + + # parse args from json file + parse_json(sys.argv[1]) + elements = conf['dib']['elements'] + if 'haproxy' in elements: + # Enable default site in apache2 for local repo + cmd = ("sudo cp" + " /etc/apache2/sites-available/000-default.conf" + " /etc/apache2/sites-enabled/") + os.system(cmd) + cmd = ("sudo service apache2 restart") + os.system(cmd) + if(update_haproxy_repo()): + exit() + + nfp_branch_name = sys.argv[2] if len(sys.argv) == 3 else None + + if 'configurator' in elements and nfp_branch_name is None: + print("ERROR: You have to pass NFP_BRANCH_NAME.") + exit() + + # run Disk Image Builder to create VM image + dib(nfp_branch_name) + + if 'haproxy' in elements: + # Disable the default site in apache2 + cmd = ("sudo rm" + " /etc/apache2/sites-enabled/000-default.conf") + os.system(cmd) + cmd = ("sudo service apache2 restart") + os.system(cmd) diff --git a/gbpservice/tests/contrib/diskimage-create/disk_image_create.py b/gbpservice/tests/contrib/diskimage-create/disk_image_create.py index 9590e357d3..d3fe131a7b 100755 --- a/gbpservice/tests/contrib/diskimage-create/disk_image_create.py +++ b/gbpservice/tests/contrib/diskimage-create/disk_image_create.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -import commands import os from oslo_serialization import jsonutils import subprocess @@ -37,7 +36,7 @@ def set_nfp_git_branch(nfp_branch_name, configurator_dir): def create_configurator_docker(nfp_branch_name): - configurator_dir = "%s/../../../nfp/configurator" % cur_dir + configurator_dir = "%s/../../../contrib/nfp/configurator" % cur_dir docker_images = "%s/output/docker_images/" % cur_dir if not os.path.exists(docker_images): os.makedirs(docker_images) @@ -66,63 +65,6 @@ def create_configurator_docker(nfp_branch_name): return 0 -def create_apt_source_list(): - """ - Creates a file 00-haproxy-agent-debs, this will be executed by dib to - create a file haproxy-agent-debs.list file inside VM - at /etc/apt/sources.list.d/ - This file will contain entries for apt to fetch any debs from - our local repo - """ - elems = "%s/elements" % cur_dir - - # update repo_host ip in 00-haproxy-agent-debs file - # this file will be copied to VM at /etc/apt/sources.list.d/ - os.chdir("%s/debs/pre-install.d/" % elems) - with open("00-haproxy-agent-debs", "w") as f: - f.write("#!/bin/bash\n\n") - f.write("set -eu\n") - f.write("set -o xtrace\n\n") - f.write("apt-get install ubuntu-cloud-keyring\n") - if 'haproxy' in conf['dib']['elements']: - tmp_str = ('echo "deb http://%s/ /haproxy/"' - ' > /etc/apt/sources.list.d/haproxy-agent-debs.list' - % 'localhost') - f.write(tmp_str + '\n') - - -def update_haproxy_repo(): - haproxy_vendor_dir = ("%s/../../../nfp/service_vendor_agents/haproxy" - % cur_dir) - service = 'haproxy-agent' - version = '1' - release = '1' - subprocess.call(['rm', '-rf', - "%s/%s/deb-packages" % (haproxy_vendor_dir, service)]) - os.chdir(haproxy_vendor_dir) - ret = subprocess.call(['bash', - 'build_haproxy_agent_deb.sh', - service, - version, release]) - if(ret): - print("ERROR: Unable to generate haproxy-agent deb package") - return 1 - - subprocess.call(["rm", "-rf", "/var/www/html/haproxy"]) - out = subprocess.call(["mkdir", "-p", "/var/www/html/haproxy/"]) - haproxy_agent_deb = ("%s/%s/deb-packages/%s-%s-%s.deb" - % (haproxy_vendor_dir, service, - service, version, release)) - subprocess.call(["cp", haproxy_agent_deb, "/var/www/html/haproxy/"]) - - os.chdir("/var/www/html") - out = commands.getoutput("dpkg-scanpackages haproxy/ /dev/null" - " | gzip -9c > haproxy/Packages.gz") - print(out) - - return 0 - - def dib(nfp_branch_name): dib = conf['dib'] elems = "%s/elements/" % cur_dir @@ -163,10 +105,6 @@ def dib(nfp_branch_name): create_configurator_docker(nfp_branch_name) # for bigger size images dib_args.append('--no-tmpfs') - elif element == 'haproxy': - image_name = 'haproxy' - dib_args.append('debs') - create_apt_source_list() # offline mode, assuming the image cache (tar) already exists dib_args.append('--offline') @@ -190,9 +128,8 @@ def dib(nfp_branch_name): if not ret: image_path = "%s/output/%s.qcow2" % (cur_dir, image_name) print("Image location: %s" % image_path) - with open("/tmp/image_path", "w") as f: + with open("%s/output/last_built_image_path" % cur_dir, "w") as f: f.write(image_path) - f.close() if __name__ == "__main__": @@ -215,10 +152,6 @@ def dib(nfp_branch_name): # parse args from json file parse_json(sys.argv[1]) elements = conf['dib']['elements'] - elem = 'haproxy' - if elem in elements: - if(update_haproxy_repo()): - exit() nfp_branch_name = sys.argv[2] if len(sys.argv) == 3 else None diff --git a/gbpservice/tests/contrib/diskimage-create/visibility_disk_image_create.py b/gbpservice/tests/contrib/diskimage-create/visibility_disk_image_create.py index 011ad07b45..31d508af71 100755 --- a/gbpservice/tests/contrib/diskimage-create/visibility_disk_image_create.py +++ b/gbpservice/tests/contrib/diskimage-create/visibility_disk_image_create.py @@ -76,7 +76,7 @@ def set_nfp_git_branch(nfp_branch_name, configurator_dir): def create_configurator_docker(nfp_branch_name): - configurator_dir = "%s/../../../nfp/configurator" % cur_dir + configurator_dir = "%s/../../../contrib/nfp/configurator" % cur_dir docker_images = "%s/output/docker_images/" % cur_dir if not os.path.exists(docker_images): os.makedirs(docker_images) @@ -162,7 +162,7 @@ def update_haproxy_repo(): return 0 -def dib(nfp_branch_name, local_conf_file_path): +def dib(nfp_branch_name, docker_images_url): dib = conf['dib'] elems = "%s/elements/" % cur_dir @@ -203,10 +203,7 @@ def dib(nfp_branch_name, local_conf_file_path): create_visibility_docker() # create_configurator_docker(nfp_branch_name) # set environment variable, needed by 'extra-data.d' - p1 = subprocess.Popen(['grep', 'DOCKER_IMAGES_URL', local_conf_file_path], stdout=subprocess.PIPE) - p2 = subprocess.Popen(['cut', '-d', '=', '-f', '2'], stdin=p1.stdout, stdout=subprocess.PIPE) - p3 = subprocess.Popen(['tr', '-d', '[[:space:]]'], stdin=p2.stdout, stdout=subprocess.PIPE) - os.environ['DOCKER_IMAGES_URL'] = p3.communicate()[0] + os.environ['DOCKER_IMAGES_URL'] = docker_images_url # for bigger size images dib_args.append('--no-tmpfs') elif element == 'haproxy': @@ -238,9 +235,8 @@ def dib(nfp_branch_name, local_conf_file_path): if not ret: image_path = "%s/output/%s.qcow2" % (cur_dir, image_name) print("Image location: %s" % image_path) - with open("/tmp/image_path", "w") as f: + with open("%s/output/last_built_image_path" % cur_dir, "w") as f: f.write(image_path) - f.close() if __name__ == "__main__": @@ -250,7 +246,7 @@ def dib(nfp_branch_name, local_conf_file_path): print("Usage:\n\t%s NFP_BRANCH_NAME local.conf file" % sys.argv[0]) print("\twhere: contains all the configuration") print("\tNFP_BRANCH_NAME is the string") - print("\tand is the configuration file from the devstack directory.") + print("\tand DOCKER_IMAGES_URL is the URL string.") exit() # save PWD diff --git a/gbpservice/tests/contrib/diskimage-create/vyos/customize_vyos.sh b/gbpservice/tests/contrib/diskimage-create/vyos/customize_vyos.sh new file mode 100644 index 0000000000..7bb2902ee0 --- /dev/null +++ b/gbpservice/tests/contrib/diskimage-create/vyos/customize_vyos.sh @@ -0,0 +1,73 @@ +#!/bin/vbash +source /opt/vyatta/etc/functions/script-template + +# set rules +set firewall all-ping 'enable' +set firewall broadcast-ping 'disable' +set firewall config-trap 'disable' +set firewall ipv6-receive-redirects 'disable' +set firewall ipv6-src-route 'disable' +set firewall ip-src-route 'disable' +set firewall log-martians 'enable' +set firewall receive-redirects 'disable' +set firewall send-redirects 'enable' +set firewall source-validation 'disable' +set firewall syn-cookies 'enable' +set firewall twa-hazards-protection 'disable' +set 'policy' +set protocols 'static' + +# delete non-working repository +delete system package repository community +# Add squeeze repository for downloading dependent packages +set system package repository squeeze components 'main contrib non-free' +set system package repository squeeze distribution 'squeeze' +set system package repository squeeze url 'http://archive.debian.org/debian' +set system package repository squeeze-lts components 'main contrib non-free' +set system package repository squeeze-lts distribution 'squeeze-lts' +set system package repository squeeze-lts url 'http://archive.debian.org/debian' +# set local repo +set system package repository vyos components '#' +set system package repository vyos distribution 'amd64/' +set system package repository vyos url 'http://192.168.122.1/vyos/' + +# change password +set system login user vyos authentication plaintext-password $VYOS_PASSWORD +commit +save + +# update the repo +sudo apt-get -o Acquire::Check-Valid-Until=false update +# install dependent packages +sudo apt-get -y install python-netifaces python-flask python-netaddr + +# get vyos package +sudo apt-get -y --force-yes install vyos + +set system task-scheduler task health-monitor executable path '/usr/share/vyos/config_server/interface_monitor.sh' +set system task-scheduler task health-monitor interval '5m' + +# delete the local repo +delete system package repository vyos + +# commit and save the above changes +commit +save +exit + +# edit /etc/network/interfaces file as required by vyos agent +# make 'static' to all interfaces except eth0 +sudo sed -i 's/inet dhcp/inet static/g' /etc/network/interfaces +sudo sed -i 's/eth0 inet static/eth0 inet dhcp/g' /etc/network/interfaces + +# copy the missing pl files +sudo cp /opt/vyatta/sbin/vyatta-firewall-trap.pl / +sudo cp /opt/vyatta/sbin/valid_port_range.pl / +sudo cp /opt/vyatta/sbin/vyatta-firewall.pl / +sudo cp /opt/vyatta/sbin/vyatta-fw-global-state-policy.pl / +sudo cp /opt/vyatta/sbin/vyatta-ipset.pl / + +# free up disk space +sudo rm -rf /var/lib/apt/lists +sudo apt-get clean +sudo apt-get autoclean diff --git a/gbpservice/tests/contrib/diskimage-create/vyos/packer.json b/gbpservice/tests/contrib/diskimage-create/vyos/packer.json new file mode 100644 index 0000000000..34220127b0 --- /dev/null +++ b/gbpservice/tests/contrib/diskimage-create/vyos/packer.json @@ -0,0 +1,82 @@ +{ + "builders": [ + { + "accelerator": "kvm", + "boot_command": [ + "", + "vyos", + "vyos", + "install image", + "", + "", + "", + "Yes", + "", + "", + "", + "vyos", + "vyos", + "", + "reboot", + "Yes", + "vyos", + "vyos", + "configure", + "set interface ethernet eth0 address dhcp", + "set service ssh", + "commit", + "save", + "delete interface ethernet eth0 hw-id", + "commit", + "save", + "exit" + ], + "boot_wait": "5s", + "disk_compression": true, + "disk_interface": "virtio", + "disk_size": 4096, + "format": "qcow2", + "headless": true, + "iso_checksum": "{{user `iso_checksum`}}", + "iso_checksum_type": "md5", + "iso_url": "{{user `iso_url`}}", + "name": "qemu-image", + "net_device": "virtio-net", + "output_directory": "./output", + "qemuargs": [ + [ + "-m", + "1024" + ] + ], + "shutdown_command": "sudo halt -p", + "ssh_host_port_max": 2229, + "ssh_host_port_min": 2222, + "ssh_password": "vyos", + "ssh_port": 22, + "ssh_username": "vyos", + "ssh_wait_timeout": "300s", + "type": "qemu", + "vm_name": "vyos.qcow2" + } + ], + "provisioners": [ + { + "environment_vars": [ + "VYOS_PASSWORD={{user `vyos_passwd`}}" + ], + "only": [ + "qemu-image" + ], + "scripts": [ + "./customize_vyos.sh" + ], + "type": "shell" + } + ], + "variables": { + "iso_checksum": "{{env `ISO_MD5_SUM`}}", + "iso_url": "{{env `ISO_IMAGE`}}", + "vyos_passwd": "{{env `VYOS_PASSWORD`}}" + } +} diff --git a/gbpservice/tests/contrib/diskimage-create/vyos/vyos_conf.json b/gbpservice/tests/contrib/diskimage-create/vyos/vyos_conf.json new file mode 100644 index 0000000000..71b62c60d2 --- /dev/null +++ b/gbpservice/tests/contrib/diskimage-create/vyos/vyos_conf.json @@ -0,0 +1,7 @@ +{ + "packer": + { + "image_size":4, + "vyos_pswd":"oc@sc!23;)" + } +} diff --git a/gbpservice/tests/contrib/diskimage-create/vyos/vyos_image_create.py b/gbpservice/tests/contrib/diskimage-create/vyos/vyos_image_create.py new file mode 100644 index 0000000000..45b4c0668d --- /dev/null +++ b/gbpservice/tests/contrib/diskimage-create/vyos/vyos_image_create.py @@ -0,0 +1,200 @@ +import sys +import os +import json +import subprocess +import commands +import datetime +import requests + + +conf = [] +cur_dir = '' + +def parse_json(j_file): + global conf + + with open(j_file) as json_data: + config = json.load(json_data) + return config + + +def update_vyos_repo(): + + vyos_vendor_dir = ("%s/../../../../nfp/service_vendor_agents/vyos/" % cur_dir) + service = 'agent' + version = '2' + release = '1' + subprocess.call(['rm', '-rf', + "%s/%s/deb-packages" % (vyos_vendor_dir, service)]) + os.chdir(vyos_vendor_dir) + ret = subprocess.call(['bash', + 'build_vyos_deb.sh', + service, + version, release]) + + if(ret): + print "ERROR: Unable to generate vyos agent deb package" + return 1 + + subprocess.call(["rm", "-rf", "/var/www/html/vyos"]) + subprocess.call(["mkdir", "-p", "/var/www/html/vyos/amd64"]) + vyos_agent_deb = ("%s/%s/deb-packages/vyos-%s-%s.deb" + % (vyos_vendor_dir, service, + version, release)) + subprocess.call(["cp", vyos_agent_deb, "/var/www/html/vyos/amd64/"]) + + # update repo Packages.gz + os.chdir("/var/www/html/vyos") + out = commands.getoutput("dpkg-scanpackages amd64 | gzip -9c > amd64/Packages.gz") + print out + + return 0 + +def packer_build(): + + os.chdir(cur_dir) + os.environ['VYOS_PASSWORD'] = conf['packer']['vyos_pswd'] + + # get the packer configuration + try: + conf_packer = parse_json("./packer.json") + except Exception as e: + print "ERROR: parsing ./packer.json file" + print e + return + + # packer expects VM size in MB + conf_packer['builders'][0]['disk_size'] = conf['packer']['image_size'] * 1024 + # packer exptects new output dir name for each run, packer creates the dir + # update VM output file name + filepath = os.environ.get('ISO_IMAGE', '-1') + iso = os.path.basename(filepath) + + # update the packer.json file + with open('packer.json', 'w') as f: + json.dump(conf_packer, f, sort_keys = True, indent = 4, ensure_ascii=False) + + print "\n#########################################################" + print "Invoking packer build, this will take about 10mins......" + print "#########################################################\n" + # invoke packer build + ret = subprocess.call(["packer", "build", "packer.json"]) + if ret: + print "ERROR: packer build failed" + + image_path = "%s/output/%s.qcow2" % (cur_dir, "vyos") + print("Image location: %s" % image_path) + + return + + +def check_packer_tool(): + if(os.path.isfile("/usr/local/bin/packer")): + return 0 + + # get packer tool from website + print "Downloading 'packer' tool" + ret = subprocess.call(["wget", "https://releases.hashicorp.com/packer/0.10.1/packer_0.10.1_linux_amd64.zip"]) + if ret: + print "ERROR: Unable to download packer tool" + return 1 + # unzip the file and copy packer tool to specific place + ret = subprocess.call(["unzip", "packer_0.10.1_linux_amd64.zip"]) + if ret: + return 1 + ret = subprocess.call(["cp", "packer", "/usr/local/bin/"]) + if ret: + return 1 + return 0 + +def get_vyos_iso(): + iso_path = os.environ['HOME'] + "/.cache/image-create/" + iso_file = "vyos-1.1.7-amd64.iso" + os.environ['ISO_IMAGE'] = iso_path + iso_file + os.environ['ISO_MD5_SUM'] = commands.getoutput("md5sum %s" % (iso_path + iso_file)).split(' ')[0] + if(os.path.isfile(iso_path + iso_file)): + print "VyOS iso: %s exists locally" % (iso_path + iso_file) + return 0 + + # get the output dir + if not os.path.isdir(iso_path): + os.makedirs(iso_path) + + # download iso from internet + os.chdir(iso_path) + print "Downloading VyOS 1.1.7 ISO" + iso_url = "http://packages.vyos.net/iso/release/1.1.7/vyos-1.1.7-amd64.iso" + ret = subprocess.call(["wget", iso_url]) + if ret: + return 1 + + # get sha1sum for iso from web + sha1sum_web = '' + r = requests.get("http://packages.vyos.net/iso/release/1.1.7/sha1sums") + sha1sums = r.content.splitlines() + for sums in sha1sums: + if(sums.find(iso_file)) > 0: + sha1sum_web = sums.split(' ')[0] + + # calculate the sha1 of downloaded file + sha1sum_local = commands.getoutput("sha1sum %s" % (iso_path + iso_file)).split(' ')[0] + + if not sha1sum_web == sha1sum_local: + print "Downloaded iso file is corrupt, exiting now..." + return 1 + os.environ['ISO_MD5_SUM'] = commands.getoutput("md5sum %s" % (iso_path + iso_file)).split(' ')[0] + + return 0 + + + +if __name__ == "__main__": + + if os.geteuid(): + sys.exit("ERROR: Script should be run as sudo/root") + if len(sys.argv) != 2: + print "ERROR: Invalid Usage" + print "Usage:\n\t%s " % sys.argv[0] + print "\twhere: contains all the configuration" + exit() + # save PWD + cur_dir = os.path.dirname(__file__) + cur_dir = os.path.realpath(cur_dir) + if not cur_dir: + # if script is executed from current dir, get abs path + cur_dir = os.path.realpath('./') + + # parse args from json file + try: + conf = parse_json(sys.argv[1]) + except Exception as e: + print "ERROR parsing json file" + print e + exit() + + if(check_packer_tool()): + print "ERROR: Failed to get packer tool" + exit() + + if(get_vyos_iso()): + print "ERROR: Unable to get vyos-1.1.7-amd64.iso file" + exit() + + # Enable default site in apache2 for local repo + cmd = ("sudo cp" + " /etc/apache2/sites-available/000-default.conf" + " /etc/apache2/sites-enabled/") + os.system(cmd) + cmd = ("sudo service apache2 restart") + os.system(cmd) + if(update_vyos_repo()): + exit() + + packer_build() + + # Disable the default site in apache2 + cmd = ("sudo rm" + " /etc/apache2/sites-enabled/000-default.conf") + os.system(cmd) + cmd = ("sudo service apache2 restart") + os.system(cmd) diff --git a/gbpservice/tests/contrib/functions-gbp b/gbpservice/tests/contrib/functions-gbp index 21f8090293..49ca1cc36b 100644 --- a/gbpservice/tests/contrib/functions-gbp +++ b/gbpservice/tests/contrib/functions-gbp @@ -24,6 +24,22 @@ function prepare_gbp_devstack { source $TOP_DIR/functions-common } +function prepare_nfp_devstack { + cd $TOP_DIR + sudo cp $CONTRIB_DIR/devstack/local-nfp.conf $TOP_DIR/local.conf + sudo rm -rf $TOP_DIR/exercises/*.sh + sudo cp $CONTRIB_DIR/devstack/exercises-nfp/*.sh $TOP_DIR/exercises/ + sudo cp -r $CONTRIB_DIR/devstack/nfp-templates $TOP_DIR + sudo cp $CONTRIB_DIR/devstack/nfp $TOP_DIR/lib/ + sed -i 's/source $TOP_DIR\/lib\/dstat/source $TOP_DIR\/lib\/dstat\nsource $TOP_DIR\/lib\/nfp/g' stack.sh + sed -i "s/install_gbpservice/install_gbpservice\n[[ $NFP_DEVSTACK_MODE = advanced ]] && configure_nfp_loadbalancer\n[[ $NFP_DEVSTACK_MODE = advanced ]] && configure_nfp_vpn\n/g" stack.sh + sed -i "s/start_neutron_service_and_check/[[ $NFP_DEVSTACK_MODE = advanced ]] && configure_nfp_firewall\n start_neutron_service_and_check/g" stack.sh + sed -i 's/# Restore\/close logging file descriptors/nfp_setup $TOP_DIR\n# Restore\/close logging file descriptors/g' stack.sh + source $TOP_DIR/functions + source $TOP_DIR/functions-common + +} + function source_creds { local xtrace=$(set +o | grep xtrace) set +o xtrace diff --git a/gbpservice/tests/contrib/gate_nfp_hook.sh b/gbpservice/tests/contrib/gate_nfp_hook.sh new file mode 100644 index 0000000000..35beb2e5b6 --- /dev/null +++ b/gbpservice/tests/contrib/gate_nfp_hook.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +CONTRIB_DIR="$BASE/new/group-based-policy/gbpservice/tests/contrib" +cp $CONTRIB_DIR/functions-gbp . +source functions-gbp + +set -x + +trap prepare_logs ERR + +prepare_gbp_devstack +prepare_nfp_devstack +$TOP_DIR/stack.sh + +# Use devstack functions to install mysql and psql servers +source $TOP_DIR/stackrc +source $TOP_DIR/lib/database +disable_service postgresql +enable_service mysql +initialize_database_backends +install_database + +# Set up the 'openstack_citest' user and database in each backend +tmp_dir=`mktemp -d` + +cat << EOF > $tmp_dir/mysql.sql +CREATE DATABASE openstack_citest; +CREATE USER 'openstack_citest'@'localhost' IDENTIFIED BY 'openstack_citest'; +CREATE USER 'openstack_citest' IDENTIFIED BY 'openstack_citest'; +GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest'@'localhost'; +GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest'; +FLUSH PRIVILEGES; +EOF +/usr/bin/mysql -u root < $tmp_dir/mysql.sql diff --git a/gbpservice/tests/contrib/nfp_service/reference_configurator/api/__init__.py b/gbpservice/tests/contrib/nfp_service/reference_configurator/api/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/gbpservice/tests/contrib/nfp_service/reference_configurator/api/config.py b/gbpservice/tests/contrib/nfp_service/reference_configurator/api/config.py deleted file mode 100644 index 442308d9d0..0000000000 --- a/gbpservice/tests/contrib/nfp_service/reference_configurator/api/config.py +++ /dev/null @@ -1,68 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Server Specific Configurations -server = { - 'port': '8080', - 'host': '0.0.0.0' -} - -# Pecan Application Configurations -app = { - 'root': 'root_controller.RootController', - 'modules': ['v1'], - 'debug': True, - 'errors': { - 404: '/error/404', - '__force_dict__': True - } -} - -logging = { - 'root': {'level': 'INFO', 'handlers': ['console']}, - 'loggers': { - 'pecanlog': {'level': 'INFO', - 'handlers': ['console'], - 'propagate': False}, - 'pecan': {'level': 'INFO', - 'handlers': ['console'], - 'propagate': False}, - 'py.warnings': {'handlers': ['console']}, - '__force_dict__': True - }, - 'handlers': { - 'console': { - 'level': 'INFO', - 'class': 'logging.StreamHandler', - 'formatter': 'color' - } - }, - 'formatters': { - 'simple': { - 'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]' - '[%(threadName)s] %(message)s') - }, - 'color': { - '()': 'pecan.log.ColorFormatter', - 'format': ('%(asctime)s [%(padded_color_levelname)s] [%(name)s]' - '[%(threadName)s] %(message)s'), - '__force_dict__': True - } - } -} - -# Custom Configurations must be in Python dictionary format:: -# -# foo = {'bar':'baz'} -# -# All configurations are accessible at:: -# pecan.conf diff --git a/gbpservice/tests/contrib/nfp_service/reference_configurator/api/v1/__init__.py b/gbpservice/tests/contrib/nfp_service/reference_configurator/api/v1/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/gbpservice/tests/contrib/nfp_service/reference_configurator/bin/nfp-pecan b/gbpservice/tests/contrib/nfp_service/reference_configurator/bin/nfp-pecan index cecabd8edc..88498579f6 100644 --- a/gbpservice/tests/contrib/nfp_service/reference_configurator/bin/nfp-pecan +++ b/gbpservice/tests/contrib/nfp_service/reference_configurator/bin/nfp-pecan @@ -1,4 +1,3 @@ #!/bin/sh -script='/home/ubuntu/reference_configurator/api/config.py' -pecan serve $script & - +script='/usr/local/lib/python2.7/dist-packages/gbpservice/nfp/pecan/api/config.py' +pecan configurator_decider $script --mode base_with_vm & diff --git a/gbpservice/tests/contrib/nfp_service/reference_configurator/config/pecan.service b/gbpservice/tests/contrib/nfp_service/reference_configurator/config/pecan.service index 5a334c3fcb..0e2a7cc8eb 100644 --- a/gbpservice/tests/contrib/nfp_service/reference_configurator/config/pecan.service +++ b/gbpservice/tests/contrib/nfp_service/reference_configurator/config/pecan.service @@ -5,7 +5,7 @@ After=network.target auditd.service [Service] Type=forking Restart=always -ExecStart=/home/ubuntu/reference_configurator/bin/nfp-pecan +ExecStart=/usr/local/lib/python2.7/dist-packages/gbpservice/tests/contrib/nfp_service/reference_configurator/bin/nfp-pecan KillMode=process Restart=on-failure diff --git a/gbpservice/tests/contrib/nfp_service/reference_configurator/api/v1/controllers/__init__.py b/gbpservice/tests/contrib/nfp_service/reference_configurator/controllers/__init__.py similarity index 100% rename from gbpservice/tests/contrib/nfp_service/reference_configurator/api/v1/controllers/__init__.py rename to gbpservice/tests/contrib/nfp_service/reference_configurator/controllers/__init__.py diff --git a/gbpservice/tests/contrib/nfp_service/reference_configurator/api/v1/controllers/controller.py b/gbpservice/tests/contrib/nfp_service/reference_configurator/controllers/controller.py similarity index 98% rename from gbpservice/tests/contrib/nfp_service/reference_configurator/api/v1/controllers/controller.py rename to gbpservice/tests/contrib/nfp_service/reference_configurator/controllers/controller.py index 882f696b0a..199c8edbb6 100644 --- a/gbpservice/tests/contrib/nfp_service/reference_configurator/api/v1/controllers/controller.py +++ b/gbpservice/tests/contrib/nfp_service/reference_configurator/controllers/controller.py @@ -27,8 +27,9 @@ SUCCESS = 'SUCCESS' notifications = [] -FW_SCRIPT_PATH = ("/home/ubuntu/reference_configurator/" + - "scripts/configure_fw_rules.py") +FW_SCRIPT_PATH = ("/usr/local/lib/python2.7/dist-packages/" + + "gbpservice/tests/contrib/nfp_service/" + + "reference_configurator/scripts/configure_fw_rules.py") class Controller(rest.RestController): diff --git a/gbpservice/tests/contrib/post_test_nfp_hook.sh b/gbpservice/tests/contrib/post_test_nfp_hook.sh new file mode 100644 index 0000000000..392ed39c62 --- /dev/null +++ b/gbpservice/tests/contrib/post_test_nfp_hook.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +source functions-gbp + +set -x + +trap prepare_logs ERR + +# Run exercise scripts +$TOP_DIR/exercise.sh +exercises_exit_code=$? + +source $TOP_DIR/lib/nfp +delete_nfp_gbp_resources $TOP_DIR + +# Check if exercises left any resources undeleted +check_residual_resources neutron service +check_residual_resources admin admin +check_residual_resources admin demo +check_residual_resources demo demo + +# Prepare the log files for Jenkins to upload +prepare_logs + +exit $(($exercises_exit_code)) diff --git a/test-requirements.txt b/test-requirements.txt index 0abfc34831..db41102e9a 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -18,6 +18,8 @@ cairocffi>=0.1 cliff>=1.15.0 # Apache-2.0 coverage>=3.6 # Apache-2.0 fixtures>=1.3.1 # Apache-2.0/BSD +ipaddr==2.1.10 +iptools==0.6.1 httplib2>=0.7.5 mock>=1.2 # BSD python-subunit>=0.0.18 # Apache-2.0/BSD