From 017bdca7804ac34ff52c2d882e6bf7977b04131d Mon Sep 17 00:00:00 2001
From: Kiran Chunduri <nagchund@cisco.com>
Date: Wed, 25 Jun 2014 09:36:35 -0700
Subject: [PATCH] New manifest file to deploy Cisco N1KV-VEM
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Background on 'N1KV-VEM’: It is the host-side component of N1KV
implementing the notion of Virtual Ports for VM Interfaces and also
handling the VM data-traffic. VEM is implemented as a couple of
user-space processes and also has a KLM for efficient data-path
switching.
VEM manifests does: setting up the Config-File for VEM service,
Starting VEM service and finally adding physical ports (along with
port-profile) to the VEM
Change-Id: Ibc8f1c4d2d743589e452ae6d1d87b5bfa123bc3a
---
.fixtures.yml | 1 +
Modulefile | 1 +
manifests/agents/n1kv_vem.pp | 235 +++++++++++++++++++++++++++
manifests/params.pp | 6 +
spec/classes/neutron_agents_n1kv_vem_spec.rb | 181 +++++++++++++++++++++
templates/n1kv.conf.erb | 152 +++++++++++++++++
6 files changed, 576 insertions(+)
create mode 100644 manifests/agents/n1kv_vem.pp
create mode 100644 spec/classes/neutron_agents_n1kv_vem_spec.rb
create mode 100644 templates/n1kv.conf.erb
diff --git a/.fixtures.yml b/.fixtures.yml
index 883a251..be7fa9c 100644
--- a/.fixtures.yml
+++ b/.fixtures.yml
@@ -8,5 +8,6 @@ fixtures:
ref: 'origin/0.x'
"nova": "git://github.com/stackforge/puppet-nova.git"
"vswitch": "git://github.com/stackforge/puppet-vswitch"
+ 'sysctl': 'git://github.com/duritong/puppet-sysctl.git'
symlinks:
"neutron": "#{source_dir}"
diff --git a/Modulefile b/Modulefile
index 0a80358..d885b5d 100644
--- a/Modulefile
+++ b/Modulefile
@@ -14,3 +14,4 @@ dependency 'puppetlabs/mysql', '>=0.9.0 <3.0.0'
dependency 'puppetlabs/nova', '>=4.0.0 <5.0.0'
dependency 'puppetlabs/stdlib', '>=3.2.0'
dependency 'puppetlabs/vswitch', '>=0.2.0 <1.0.0'
+dependency 'duritong/sysctl', '>=0.0.1 <1.0.0'
diff --git a/manifests/agents/n1kv_vem.pp b/manifests/agents/n1kv_vem.pp
new file mode 100644
index 0000000..439355f
--- /dev/null
+++ b/manifests/agents/n1kv_vem.pp
@@ -0,0 +1,235 @@
+# == Class: n1kv_vem
+#
+# Deploy N1KV VEM on compute and network nodes.
+# Support exists and tested for RedHat.
+# (For Ubuntu/Debian platforms few changes and testing pending.)
+#
+# === Parameters
+# [*n1kv_vsm_ip*]
+# (required) N1KV VSM(Virtual Supervisor Module) VM's IP.
+# Defaults to 127.0.0.1
+#
+# [*n1kv_vsm_domainid*]
+# (required) N1KV VSM DomainID.
+# Defaults to 1000
+#
+# [*host_mgmt_intf*]
+# (required) Management Interface of node where VEM will be installed.
+# Defaults to eth1
+#
+# [*uplink_profile*]
+# (optional) Uplink Interfaces that will be managed by VEM. The uplink
+# port-profile that configures these interfaces should also be specified.
+# (format)
+# $uplink_profile = { 'eth1' => 'profile1',
+# 'eth2' => 'profile2'
+# },
+# Defaults to empty
+#
+# [*vtep_config*]
+# (optional) Virtual tunnel interface configuration.
+# Eg:VxLAN tunnel end-points.
+# (format)
+# $vtep_config = { 'vtep1' => { 'profile' => 'virtprof1',
+# 'ipmode' => 'dhcp'
+# },
+# 'vtep2' => { 'profile' => 'virtprof2',
+# 'ipmode' => 'static',
+# 'ipaddress' => '192.168.1.1',
+# 'netmask' => '255.255.255.0'
+# }
+# },
+# Defaults to empty
+#
+# [*node_type*]
+# (optional). Specify the type of node: 'compute' (or) 'network'.
+# Defaults to 'compute'
+#
+# All the above parameter values will be used in the config file: n1kv.conf
+#
+# [*vteps_in_same_subnet*]
+# (optional)
+# The VXLAN tunnel interfaces created on VEM can belong to same IP-subnet.
+# In such case, set this parameter to true. This results in below
+# 'sysctl:ipv4' values to be modified.
+# rp_filter (reverse path filtering) set to 2(Loose).Default is 1(Strict)
+# arp_ignore (arp reply mode) set to 1:reply only if target ip matches
+# that of incoming interface. Default is 0
+# arp_announce (arp announce mode) set to 1. Default is 0
+# Please refer Linux Documentation for detailed description
+# http://lxr.free-electrons.com/source/Documentation/networking/ip-sysctl.txt
+#
+# If the tunnel interfaces are not in same subnet set this parameter to false.
+# Note that setting to false causes no change in the sysctl settings and does
+# not revert the changes made if it was originally set to true on a previous
+# catalog run.
+#
+# Defaults to false
+#
+# [*n1kv_source*]
+# (optional)
+# n1kv_source ==> VEM package location. One of below
+# A)URL of yum repository that hosts VEM package.
+# B)VEM RPM/DPKG file name, If present locally in 'files' folder
+# C)If not specified, assumes that VEM image is available in
+# default enabled repositories.
+# Defaults to empty
+#
+# [*n1kv_version*]
+# (optional). Specify VEM package version to be installed.
+# Not applicable if 'n1kv_source' is a file. (Option-B above)
+# Defaults to 'present'
+#
+# [*package_ensure*]
+# (optional) Ensure state for dependent packages: Openvswitch/libnl.
+# Defaults to 'present'.
+#
+# [*enable*]
+# (optional) Enable state for service. Defaults to 'true'.
+#
+# [*manage_service*]
+# (optional) Whether to start/stop the service
+# Defaults to true
+#
+class neutron::agents::n1kv_vem (
+ $n1kv_vsm_ip = '127.0.0.1',
+ $n1kv_vsm_domain_id = 1000,
+ $host_mgmt_intf = 'eth1',
+ $uplink_profile = {},
+ $vtep_config = {},
+ $node_type = 'compute',
+ $vteps_in_same_subnet = false,
+ $n1kv_source = '',
+ $n1kv_version = 'present',
+ $package_ensure = 'present',
+ $enable = true,
+ $manage_service = true
+) {
+
+ include neutron::params
+
+ Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ] }
+
+ if($::osfamily != 'Redhat') {
+ #current support exists for Redhat family.
+ #Support for Debian will be added soon.
+ fail("Unsupported osfamily ${::osfamily}")
+ }
+
+ #Check source of n1kv-vem image:yum-repo (or) local file in 'files' directory
+ if $n1kv_source != '' {
+ if ($n1kv_source =~ /^http/) or ($n1kv_source =~ /^ftp/) {
+ $vemimage_uri = 'repo'
+ } else {
+ $vemimage_uri = 'file'
+ $vemtgtimg = "/var/n1kv/${n1kv_source}"
+ }
+ } else {
+ $vemimage_uri = 'unspec'
+ }
+
+
+ package { 'libnl':
+ ensure => $package_ensure,
+ name => $::neutron::params::libnl_package
+ }
+
+ package { 'openvswitch':
+ ensure => $package_ensure,
+ name => $::neutron::params::ovs_package
+ }
+
+ file {
+ '/etc/n1kv':
+ ensure => directory,
+ owner => 'root',
+ group => 'root',
+ mode => '0755';
+ '/var/n1kv':
+ ensure => directory,
+ owner => 'root',
+ group => 'root',
+ mode => '0755',
+ }
+
+ #specify template corresponding to 'n1kv.conf'
+ file {'/etc/n1kv/n1kv.conf':
+ ensure => present,
+ owner => 'root',
+ group => 'root',
+ mode => '0664',
+ content => template('neutron/n1kv.conf.erb'),
+ require => File['/etc/n1kv'],
+ }
+
+ if $vemimage_uri == 'file' {
+ #specify location on target-host where image file will be downloaded to.
+ #Later vem package: 'nexus1000v' will be installed from this file.
+ file { $vemtgtimg:
+ owner => 'root',
+ group => 'root',
+ mode => '0664',
+ source => "puppet:///modules/neutron/${n1kv_source}",
+ require => File['/var/n1kv'],
+ }
+ package {'nexus1000v':
+ ensure => $n1kv_version,
+ provider => $::neutron::params::package_provider,
+ source => $vemtgtimg,
+ require => File[$vemtgtimg]
+ }
+ } else {
+ if $vemimage_uri == 'repo' {
+ #vem package: 'nexus1000v' will be downloaded and installed
+ #from below repo.
+ yumrepo { 'cisco-vem-repo':
+ baseurl => $n1kv_source,
+ descr => 'Repo for VEM Image',
+ enabled => 1,
+ gpgcheck => 1,
+ gpgkey => "${n1kv_source}/RPM-GPG-KEY"
+ #proxy => '_none_',
+ }
+ }
+ package {'nexus1000v':
+ ensure => $package_ensure
+ }
+ }
+
+ if $manage_service {
+ if $enable {
+ $service_ensure = 'running'
+ } else {
+ $service_ensure = 'stopped'
+ }
+ }
+
+ service { 'nexus1000v':
+ ensure => $service_ensure,
+ }
+
+ #Upon config change in 'n1kv.conf' execute below 'vemcmd reread config'.
+ #No need to restart service.
+ exec { 'vemcmd reread config':
+ subscribe => File['/etc/n1kv/n1kv.conf'],
+ refreshonly => true,
+ require => Service['nexus1000v']
+ }
+
+ if $vteps_in_same_subnet == true {
+ $my_sysctl_settings = {
+ 'net.ipv4.conf.default.rp_filter' => { value => 2 },
+ 'net.ipv4.conf.all.rp_filter' => { value => 2 },
+ 'net.ipv4.conf.default.arp_ignore' => { value => 1 },
+ 'net.ipv4.conf.all.arp_ignore' => { value => 1 },
+ 'net.ipv4.conf.all.arp_announce' => { value => 2 },
+ 'net.ipv4.conf.default.arp_announce' => { value => 2 },
+ }
+ create_resources(sysctl::value,$my_sysctl_settings)
+ }
+
+ Package['libnl'] -> Package['nexus1000v']
+ Package['openvswitch'] -> Package['nexus1000v']
+ File['/etc/n1kv/n1kv.conf'] -> Package['nexus1000v']
+ Package['nexus1000v'] ~> Service['nexus1000v']
+}
diff --git a/manifests/params.pp b/manifests/params.pp
index afda431..c143bf8 100644
--- a/manifests/params.pp
+++ b/manifests/params.pp
@@ -13,6 +13,9 @@ class neutron::params {
$ovs_agent_service = 'neutron-openvswitch-agent'
$ovs_server_package = 'openstack-neutron-openvswitch'
$ovs_cleanup_service = 'neutron-ovs-cleanup'
+ $ovs_package = 'openvswitch'
+ $libnl_package = 'libnl'
+ $package_provider = 'rpm'
$linuxbridge_agent_package = false
$linuxbridge_agent_service = 'neutron-linuxbridge-agent'
@@ -68,6 +71,9 @@ class neutron::params {
$ovs_agent_service = 'neutron-plugin-openvswitch-agent'
$ovs_server_package = 'neutron-plugin-openvswitch'
$ovs_cleanup_service = false
+ $ovs_package = 'openvswitch-switch'
+ $libnl_package = 'libnl1'
+ $package_provider = 'dpkg'
$linuxbridge_agent_package = 'neutron-plugin-linuxbridge-agent'
$linuxbridge_agent_service = 'neutron-plugin-linuxbridge-agent'
diff --git a/spec/classes/neutron_agents_n1kv_vem_spec.rb b/spec/classes/neutron_agents_n1kv_vem_spec.rb
new file mode 100644
index 0000000..9b7a4b1
--- /dev/null
+++ b/spec/classes/neutron_agents_n1kv_vem_spec.rb
@@ -0,0 +1,181 @@
+require 'spec_helper'
+
+describe 'neutron::agents::n1kv_vem' do
+
+ let :facts do
+ { :osfamily => 'RedHat' }
+ end
+
+ it 'should have a n1kv-vem config file' do
+ should contain_file('/etc/n1kv/n1kv.conf').with(
+ :ensure => 'present',
+ :owner => 'root',
+ :group => 'root',
+ :mode => '0664'
+ )
+ end
+
+ it 'install n1kv-vem' do
+ should contain_package('libnl').with_before('Package[nexus1000v]')
+ should contain_package('openvswitch').with_before('Package[nexus1000v]')
+ should contain_package('nexus1000v').with_notify('Service[nexus1000v]')
+ should contain_service('nexus1000v').with_ensure('running')
+ end
+
+ context 'with local file vem rpm' do
+ let :params do
+ {
+ :n1kv_source => 'vem.rpm'
+ }
+ end
+
+ it 'verify dependency' do
+ should contain_package('nexus1000v').with_source('/var/n1kv/vem.rpm')
+ should contain_file('/var/n1kv/vem.rpm').that_requires('File[/var/n1kv]')
+ should contain_file('/var/n1kv/vem.rpm').with(
+ :owner => 'root',
+ :group => 'root',
+ :mode => '0664'
+ )
+ end
+ end
+
+ context 'remote vem rpm' do
+ let :params do
+ {
+ :n1kv_source => 'http://www.cisco.com/repo'
+ }
+ end
+
+ it 'verify dependency' do
+ should contain_package('nexus1000v').without_source
+ should contain_yumrepo('cisco-vem-repo').with(
+ :baseurl => 'http://www.cisco.com/repo',
+ :enabled => 1
+ )
+ end
+ end
+
+ it 'execute reread config upon config change' do
+ should contain_exec('vemcmd reread config') \
+ .that_subscribes_to('File[/etc/n1kv/n1kv.conf]')
+ end
+
+ context 'verify n1kv.conf default' do
+ let :params do
+ {
+ :n1kv_vsm_ip => '9.0.0.1',
+ :n1kv_vsm_domain_id => 900,
+ :host_mgmt_intf => 'eth9'
+ }
+ end
+ it do
+ should contain_file('/etc/n1kv/n1kv.conf') \
+ .with_content(/^l3control-ipaddr 9.0.0.1/)
+ should contain_file('/etc/n1kv/n1kv.conf') \
+ .with_content(/^switch-domain 900/)
+ should contain_file('/etc/n1kv/n1kv.conf') \
+ .with_content(/^host-mgmt-intf eth9/)
+ should contain_file('/etc/n1kv/n1kv.conf') \
+ .without_content(/^phys/)
+ should contain_file('/etc/n1kv/n1kv.conf') \
+ .without_content(/^virt/)
+ should contain_file('/etc/n1kv/n1kv.conf') \
+ .with_content(/^node-type compute/)
+ end
+ end
+
+ context 'verify node_type' do
+ let :params do
+ {
+ :node_type => 'network',
+ }
+ end
+ it do
+ should contain_file('/etc/n1kv/n1kv.conf') \
+ .with_content(/^node-type network/)
+ should contain_file('/etc/n1kv/n1kv.conf') \
+ .without_content(/^node-type compute/)
+ end
+ end
+
+ context 'verify n1kv.conf with uplinks' do
+ let :params do
+ {
+ :uplink_profile => { 'eth1' => 'prof1',
+ 'eth2' => 'prof2'
+ }
+ }
+ end
+ it do
+ should contain_file('/etc/n1kv/n1kv.conf') \
+ .with_content(/^phys eth1 profile prof1/)
+ should contain_file('/etc/n1kv/n1kv.conf') \
+ .with_content(/^phys eth2 profile prof2/)
+ end
+
+ end
+
+ context 'verify n1kv.conf with vtep info' do
+ let :params do
+ {
+ :vtep_config => { 'vtep1' => { 'profile' => 'profint',
+ 'ipmode' => 'dhcp'
+ },
+ 'vtep2' => { 'profile' => 'profint',
+ 'ipmode' => 'static',
+ 'ipaddress' => '192.168.1.1',
+ 'netmask' => '255.255.255.0'
+ }
+ }
+ }
+ end
+ it do
+ should contain_file('/etc/n1kv/n1kv.conf') \
+ .with_content(/^virt vtep1 profile profint mode dhcp/)
+ should contain_file('/etc/n1kv/n1kv.conf') \
+ .with_content(/^virt vtep2 profile profint mode static/)
+ end
+
+ end
+
+ context 'with manage_service as false' do
+ let :params do
+ {
+ :manage_service => false
+ }
+ end
+ it 'should not start/stop service' do
+ should contain_service('nexus1000v').without_ensure
+ end
+ end
+
+ context 'with manage_service true and enable_service false' do
+ let :params do
+ {
+ :manage_service => true,
+ :enable => false
+ }
+ end
+ it 'should stop service' do
+ should contain_service('nexus1000v').with_ensure('stopped')
+ end
+ end
+
+ context 'verify sysctl setting with vteps_in_same_subnet true' do
+ let :params do
+ {
+ :vteps_in_same_subnet => true
+ }
+ end
+ it do
+ should contain_sysctl__value('net.ipv4.conf.default.rp_filter').with_value('2')
+ should contain_sysctl__value('net.ipv4.conf.all.rp_filter').with_value('2')
+ should contain_sysctl__value('net.ipv4.conf.default.arp_ignore').with_value('1')
+ should contain_sysctl__value('net.ipv4.conf.all.arp_ignore').with_value('1')
+ should contain_sysctl__value('net.ipv4.conf.all.arp_announce').with_value('2')
+ should contain_sysctl__value('net.ipv4.conf.default.arp_announce').with_value('2')
+ end
+ end
+
+end
diff --git a/templates/n1kv.conf.erb b/templates/n1kv.conf.erb
new file mode 100644
index 0000000..8edb3e2
--- /dev/null
+++ b/templates/n1kv.conf.erb
@@ -0,0 +1,152 @@
+# This is the N1KV VEM configuration file.
+# <n1kv.conf> file contains all the configuration parameters for VEM operation.
+# Please find below a brief explanation of these parameters and their meaning.
+# Optional Parameters and Default Values of parameters are explicitly stated.
+# Note:
+# a)Mandatory parameters are needed for proper VEM operation.
+# N1KV DP/DPA should start even if these are not specified.
+# But there will be functional impact. For eg: in VSM connectivity
+# b)For any updates to parameters to take effect, you just need to execute
+# 'vemcmd reread config'. Its the least-disruptive way for changes to
+# take effect. However for certain params, n1kv service need to be restarted.
+# These parameter are stated explictly (restart_on_modify: YES).
+#
+
+#
+#<vsm-connection-params>
+#
+# TAG: switch-domain
+# Description:
+# Optional: No
+# Default: 1000
+# restart_on_modify: No
+switch-domain <%= @n1kv_vsm_domain_id %>
+
+# TAG: l3control-ipaddr
+# Description: IP Address of VSM Mgmt I/F
+# Optional: No
+# Default: 127.0.0.1
+# restart_on_modify: No
+l3control-ipaddr <%= @n1kv_vsm_ip %>
+
+# TAG: host-mgmt-intf
+# Description: Management interface of the Host
+# Optional: No (on N1KV, we need this
+# for Host Identification on VSM).
+# Default: lo
+# restart_on_modify: Yes
+host-mgmt-intf <%= @host_mgmt_intf %>
+
+#<Port-Profile Mapping>
+# Description: Port-Profile mapping for all VEM managed Interfaces.
+# Optional: Yes
+# restart_on_modify: No
+#
+# Note: Do not specify Host Management Interface here.
+# We do yet support System Ports (Ports which need to be up all time: Post Reboot/VEM Upgrade).
+#
+# Format for physical ports:
+# phys <port-name> profile <profile-name>
+#phys eth1 profile sys-uplink
+#phys eth2 profile uplink2
+
+<% @uplink_profile.each do |port, profile| -%>
+phys <%= port%> profile <%= profile%>
+<% end -%>
+
+# Format for non-vm virt ports. For instance: VTEP ports.
+# virt <port-name> profile <profile-name> [mode static|dhcp] [address <ipaddr>]
+# [netmask <netmask ip>] [mac <macaddr>]
+# [] -->indicates optional parameters.
+#Eg:
+#virt vtep3 profile profint mode dhcp
+#virt vtep1 profile profint mode dhcp mac 00:11:22:33:44:55
+#virt vtep2 profile profint mode static address 192.168.2.91 netmask 255.255.255.0
+#virt vtep2 profile profint mode static address 192.168.2.91 netmask 255.255.255.0 mac 00:22:22:33:44:55
+
+<% @vtep_config.each do |port, params| -%>
+<% if params['ipmode'] == 'dhcp' -%>
+virt <%= port%> profile <%= params['profile']%> mode dhcp
+<% else-%>
+virt <%= port%> profile <%= params['profile']%> mode static address <%= params['ipaddress']%> netmask <%= params['netmask']%>
+<% end -%>
+<% end -%>
+
+# TAG: uvem-ovs-brname
+# Description: Default Open VSwitch Bridge Name
+# Optional: YES.
+# Default: n1kvdvs
+# restart_on_modify: Yes
+# Format:
+# uvem-ovs-brname n1kvdvs
+uvem-ovs-brname br-int
+
+# TAG: node-type
+# Description: Type of Node: 'compute' (or) 'neutron'
+# Optional: YES.
+# Default: compute
+# restart_on_modify: No
+# Format:
+# node-type compute
+node-type <%= @node_type %>
+
+# The below parameters are not commonly modified.
+#
+#
+#<system-port-profile-Info>
+# Description: System Port Profiles.
+# Optional: Yes (If there are no System Interfaces: Mgmt I/F etc)
+# restart_on_modify: No
+#
+#Trunk Profile Format
+#profile <name> trunk <vlan>
+#profile <name> native-vlan <vlan>
+#profile <name> mtu <mtu-size>
+#
+#Access Profile
+#profile <name> access <vlan>
+#profile <name> mtu <mtu-size>
+
+# TAG: dp-np-threads
+# Description: Number of datapath threads to process normal priority packets
+# Optional: YES
+# Default: 4
+# restart_on_modify: Yes
+# Format: dp-np-threads <1..32>
+
+# TAG: dp-lp-threads
+# Description: Number of datapath threads to process low priority packets
+# Optional: YES
+# Default: 1
+# restart_on_modify: Yes
+# Format: dp-lp-threads <1..32>
+
+# TAG: dp-hp-threads
+# Description: Number of datapath threads to process high priority packets
+# Optional: YES
+# Default: 1
+# restart_on_modify: Yes
+# Format: dp-hp-threads <1..32>
+
+# TAG: dp-thread-sockets
+# Description: Number of packet sockets each datapath thread creates
+# Optional: YES
+# Default: 1
+# restart_on_modify: Yes
+# Format: dp-thread-sockets <1..16>
+
+# TAG: dp-thread-socket-rbuflen
+# Description: Receive buffer length of each packet socket
+# Optional: YES
+# Default: 8 MBytes
+# restart_on_modify: Yes
+# Format: dp-thread-socket-rbuflen <0..255>
+# Note: 0 - use system default
+
+# TAG: dp-thread-socket-rrnglen
+# Description: Rx-ring length of each packet socket
+# Optional: YES
+# Default: 4096
+# restart_on_modify: Yes
+# Format: dp-thread-socket-rrnglen <0..16384>
+# Note: 0 - disables memory map I/O
--
1.9.3