ansible-2.5.1/0000755000000000000000000000000013265756223013105 5ustar rootroot00000000000000ansible-2.5.1/bin/0000755000000000000000000000000013265756221013653 5ustar rootroot00000000000000ansible-2.5.1/bin/ansible0000755000000000000000000001332513265756155015230 0ustar rootroot00000000000000#!/usr/bin/env python # (c) 2012, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ######################################################## from __future__ import (absolute_import, division, print_function) __metaclass__ = type __requires__ = ['ansible'] try: import pkg_resources except Exception: # Use pkg_resources to find the correct versions of libraries and set # sys.path appropriately when there are multiversion installs. But we # have code that better expresses the errors in the places where the code # is actually used (the deps are optional for many code paths) so we don't # want to fail here. pass import os import shutil import sys import traceback from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError from ansible.module_utils._text import to_text # Used for determining if the system is running a new enough python version # and should only restrict on our documented minimum versions _PY3_MIN = sys.version_info[:2] >= (3, 5) _PY2_MIN = (2, 6) <= sys.version_info[:2] < (3,) _PY_MIN = _PY3_MIN or _PY2_MIN if not _PY_MIN: raise SystemExit('ERROR: Ansible requires a minimum of Python2 version 2.6 or Python3 version 3.5. Current version: %s' % ''.join(sys.version.splitlines())) class LastResort(object): # OUTPUT OF LAST RESORT def display(self, msg, log_only=None): print(msg, file=sys.stderr) def error(self, msg, wrap_text=None): print(msg, file=sys.stderr) if __name__ == '__main__': display = LastResort() try: # bad ANSIBLE_CONFIG or config options can force ugly stacktrace import ansible.constants as C from ansible.utils.display import Display except AnsibleOptionsError as e: display.error(to_text(e), wrap_text=False) sys.exit(5) cli = None me = os.path.basename(sys.argv[0]) try: display = Display() display.debug("starting run") sub = None target = me.split('-') if target[-1][0].isdigit(): # Remove any version or python version info as downstreams # sometimes add that target = target[:-1] if len(target) > 1: sub = target[1] myclass = "%sCLI" % sub.capitalize() elif target[0] == 'ansible': sub = 'adhoc' myclass = 'AdHocCLI' else: raise AnsibleError("Unknown Ansible alias: %s" % me) try: mycli = getattr(__import__("ansible.cli.%s" % sub, fromlist=[myclass]), myclass) except ImportError as e: # ImportError members have changed in py3 if 'msg' in dir(e): msg = e.msg else: msg = e.message if msg.endswith(' %s' % sub): raise AnsibleError("Ansible sub-program not implemented: %s" % me) else: raise try: args = [to_text(a, errors='surrogate_or_strict') for a in sys.argv] except UnicodeError: display.error('Command line args are not in utf-8, unable to continue. Ansible currently only understands utf-8') display.display(u"The full traceback was:\n\n%s" % to_text(traceback.format_exc())) exit_code = 6 else: cli = mycli(args) cli.parse() exit_code = cli.run() except AnsibleOptionsError as e: cli.parser.print_help() display.error(to_text(e), wrap_text=False) exit_code = 5 except AnsibleParserError as e: display.error(to_text(e), wrap_text=False) exit_code = 4 # TQM takes care of these, but leaving comment to reserve the exit codes # except AnsibleHostUnreachable as e: # display.error(str(e)) # exit_code = 3 # except AnsibleHostFailed as e: # display.error(str(e)) # exit_code = 2 except AnsibleError as e: display.error(to_text(e), wrap_text=False) exit_code = 1 except KeyboardInterrupt: display.error("User interrupted execution") exit_code = 99 except Exception as e: if C.DEFAULT_DEBUG: # Show raw stacktraces in debug mode, It also allow pdb to # enter post mortem mode. raise have_cli_options = cli is not None and cli.options is not None display.error("Unexpected Exception, this is probably a bug: %s" % to_text(e), wrap_text=False) if not have_cli_options or have_cli_options and cli.options.verbosity > 2: log_only = False if hasattr(e, 'orig_exc'): display.vvv('\nexception type: %s' % to_text(type(e.orig_exc))) why = to_text(e.orig_exc) if to_text(e) != why: display.vvv('\noriginal msg: %s' % why) else: display.display("to see the full traceback, use -vvv") log_only = True display.display(u"the full traceback was:\n\n%s" % to_text(traceback.format_exc()), log_only=log_only) exit_code = 250 finally: # Remove ansible tmpdir shutil.rmtree(C.DEFAULT_LOCAL_TMP, True) sys.exit(exit_code) ansible-2.5.1/bin/ansible-config0000777000000000000000000000000013265756155020021 2ansibleustar rootroot00000000000000ansible-2.5.1/bin/ansible-connection0000755000000000000000000002343613265756155017371 0ustar rootroot00000000000000#!/usr/bin/env python # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type __requires__ = ['ansible'] try: import pkg_resources except Exception: pass import fcntl import os import signal import socket import sys import traceback import errno import json from ansible import constants as C from ansible.module_utils._text import to_bytes, to_native, to_text from ansible.module_utils.six import PY3 from ansible.module_utils.six.moves import cPickle, StringIO from ansible.module_utils.connection import Connection, ConnectionError, send_data, recv_data from ansible.module_utils.service import fork_process from ansible.playbook.play_context import PlayContext from ansible.plugins.loader import connection_loader from ansible.utils.path import unfrackpath, makedirs_safe from ansible.utils.display import Display from ansible.utils.jsonrpc import JsonRpcServer class ConnectionProcess(object): ''' The connection process wraps around a Connection object that manages the connection to a remote device that persists over the playbook ''' def __init__(self, fd, play_context, socket_path, original_path, ansible_playbook_pid=None): self.play_context = play_context self.socket_path = socket_path self.original_path = original_path self.fd = fd self.exception = None self.srv = JsonRpcServer() self.sock = None self.connection = None self._ansible_playbook_pid = ansible_playbook_pid def start(self): try: messages = list() result = {} messages.append('control socket path is %s' % self.socket_path) # If this is a relative path (~ gets expanded later) then plug the # key's path on to the directory we originally came from, so we can # find it now that our cwd is / if self.play_context.private_key_file and self.play_context.private_key_file[0] not in '~/': self.play_context.private_key_file = os.path.join(self.original_path, self.play_context.private_key_file) self.connection = connection_loader.get(self.play_context.connection, self.play_context, '/dev/null', ansible_playbook_pid=self._ansible_playbook_pid) self.connection.set_options() self.connection._connect() self.connection._socket_path = self.socket_path self.srv.register(self.connection) messages.extend(sys.stdout.getvalue().splitlines()) messages.append('connection to remote device started successfully') self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.bind(self.socket_path) self.sock.listen(1) messages.append('local domain socket listeners started successfully') except Exception as exc: result['error'] = to_text(exc) result['exception'] = traceback.format_exc() finally: result['messages'] = messages self.fd.write(json.dumps(result)) self.fd.close() def run(self): try: while self.connection.connected: signal.signal(signal.SIGALRM, self.connect_timeout) signal.signal(signal.SIGTERM, self.handler) signal.alarm(C.PERSISTENT_CONNECT_TIMEOUT) self.exception = None (s, addr) = self.sock.accept() signal.alarm(0) signal.signal(signal.SIGALRM, self.command_timeout) while True: data = recv_data(s) if not data: break signal.alarm(self.connection._play_context.timeout) resp = self.srv.handle_request(data) signal.alarm(0) send_data(s, to_bytes(resp)) s.close() except Exception as e: # socket.accept() will raise EINTR if the socket.close() is called if hasattr(e, 'errno'): if e.errno != errno.EINTR: self.exception = traceback.format_exc() else: self.exception = traceback.format_exc() finally: # when done, close the connection properly and cleanup # the socket file so it can be recreated self.shutdown() def connect_timeout(self, signum, frame): display.display('persistent connection idle timeout triggered, timeout value is %s secs' % C.PERSISTENT_CONNECT_TIMEOUT, log_only=True) self.shutdown() def command_timeout(self, signum, frame): display.display('command timeout triggered, timeout value is %s secs' % self.play_context.timeout, log_only=True) self.shutdown() def handler(self, signum, frame): display.display('signal handler called with signal %s' % signum, log_only=True) self.shutdown() def shutdown(self): """ Shuts down the local domain socket """ if os.path.exists(self.socket_path): try: if self.sock: self.sock.close() if self.connection: self.connection.close() except: pass finally: if os.path.exists(self.socket_path): os.remove(self.socket_path) setattr(self.connection, '_socket_path', None) setattr(self.connection, '_connected', False) display.display('shutdown complete', log_only=True) def main(): """ Called to initiate the connect to the remote device """ rc = 0 result = {} messages = list() socket_path = None # Need stdin as a byte stream if PY3: stdin = sys.stdin.buffer else: stdin = sys.stdin # Note: update the below log capture code after Display.display() is refactored. saved_stdout = sys.stdout sys.stdout = StringIO() try: # read the play context data via stdin, which means depickling it cur_line = stdin.readline() init_data = b'' while cur_line.strip() != b'#END_INIT#': if cur_line == b'': raise Exception("EOF found before init data was complete") init_data += cur_line cur_line = stdin.readline() if PY3: pc_data = cPickle.loads(init_data, encoding='bytes') else: pc_data = cPickle.loads(init_data) play_context = PlayContext() play_context.deserialize(pc_data) display.verbosity = play_context.verbosity except Exception as e: rc = 1 result.update({ 'error': to_text(e), 'exception': traceback.format_exc() }) if rc == 0: ssh = connection_loader.get('ssh', class_only=True) ansible_playbook_pid = sys.argv[1] cp = ssh._create_control_path(play_context.remote_addr, play_context.port, play_context.remote_user, play_context.connection, ansible_playbook_pid) # create the persistent connection dir if need be and create the paths # which we will be using later tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR) makedirs_safe(tmp_path) lock_path = unfrackpath("%s/.ansible_pc_lock" % tmp_path) socket_path = unfrackpath(cp % dict(directory=tmp_path)) # if the socket file doesn't exist, spin up the daemon process lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT, 0o600) fcntl.lockf(lock_fd, fcntl.LOCK_EX) if not os.path.exists(socket_path): messages.append('local domain socket does not exist, starting it') original_path = os.getcwd() r, w = os.pipe() pid = fork_process() if pid == 0: try: os.close(r) wfd = os.fdopen(w, 'w') process = ConnectionProcess(wfd, play_context, socket_path, original_path, ansible_playbook_pid) process.start() except Exception: messages.append(traceback.format_exc()) rc = 1 fcntl.lockf(lock_fd, fcntl.LOCK_UN) os.close(lock_fd) if rc == 0: process.run() sys.exit(rc) else: os.close(w) rfd = os.fdopen(r, 'r') data = json.loads(rfd.read()) messages.extend(data.pop('messages')) result.update(data) else: messages.append('found existing local domain socket, using it!') conn = Connection(socket_path) pc_data = to_text(init_data) try: messages.extend(conn.update_play_context(pc_data)) except Exception as exc: # Only network_cli has update_play context, so missing this is # not fatal e.g. netconf if isinstance(exc, ConnectionError) and getattr(exc, 'code', None) == -32601: pass else: result.update({ 'error': to_text(exc), 'exception': traceback.format_exc() }) messages.append(sys.stdout.getvalue()) result.update({ 'messages': messages, 'socket_path': socket_path }) sys.stdout = saved_stdout if 'exception' in result: rc = 1 sys.stderr.write(json.dumps(result)) else: rc = 0 sys.stdout.write(json.dumps(result)) sys.exit(rc) if __name__ == '__main__': display = Display() main() ansible-2.5.1/bin/ansible-console0000777000000000000000000000000013265756155020216 2ansibleustar rootroot00000000000000ansible-2.5.1/bin/ansible-doc0000777000000000000000000000000013265756155017321 2ansibleustar rootroot00000000000000ansible-2.5.1/bin/ansible-galaxy0000777000000000000000000000000013265756155020041 2ansibleustar rootroot00000000000000ansible-2.5.1/bin/ansible-inventory0000777000000000000000000000000013265756155020611 2ansibleustar rootroot00000000000000ansible-2.5.1/bin/ansible-playbook0000777000000000000000000000000013265756155020374 2ansibleustar rootroot00000000000000ansible-2.5.1/bin/ansible-pull0000777000000000000000000000000013265756155017530 2ansibleustar rootroot00000000000000ansible-2.5.1/bin/ansible-vault0000777000000000000000000000000013265756155017707 2ansibleustar rootroot00000000000000ansible-2.5.1/changelogs/0000755000000000000000000000000013265756221015215 5ustar rootroot00000000000000ansible-2.5.1/changelogs/CHANGELOG-legacy.rst0000644000000000000000000030502213265756155020510 0ustar rootroot00000000000000=============================================== Ansible Release Notes for Legacy Versions < 2.0 =============================================== 1.9.7 "Dancing in the Street" - TBD ----------------------------------- - Fix for lxc\_container backport which was broken because it tried to use a feature from ansible-2.x 1.9.6 "Dancing in the Street" - Apr 15, 2016 -------------------------------------------- - Fix a regression in the loading of inventory variables where they were not found when placed inside of an inventory directory. - Fix lxc\_container having predictable temp file names. Addresses CVE-2016-3096 1.9.5 "Dancing In the Street" - Mar 21, 2016 -------------------------------------------- - Compatibility fix with docker 1.8. - Fix a bug with the crypttab module omitting certain characters from the name of the device - Fix bug with uri module not handling all binary files - Fix bug with ini\_file not removing options set to an empty string - Fix bug with script and raw modules not honoring parameters passed via yaml dict syntax - Fix bug with plugin loading finding the wrong modules because the suffix checking was not ordered - Fix bug in the literal\_eval module code used when we need python-2.4 compat - Added --ignore-certs, -c option to ansible-galaxy. Allows ansible-galaxy to work behind a proxy when the proxy fails to forward server certificates. - Fixed bug where tasks marked no\_log were showing hidden values in output if ansible's --diff option was used. - Fix bug with non-english locales in git and apt modules - Compatibility fix for using state=absent with the pip ansible module and pip-6.1.0+ - Backported support for ansible\_winrm\_server\_cert\_validation flag to disable cert validation on Python 2.7.9+ (and support for other passthru args to pywinrm transport). - Backported various updates to user module (prevent accidental OS X group membership removals, various checkmode fixes). 1.9.4 "Dancing In the Street" - Oct 9, 2015 ------------------------------------------- - Fixes a bug where yum state=latest would error if there were no updates to install. - Fixes a bug where yum state=latest did not work with wildcard package names. - Fixes a bug in lineinfile relating to escape sequences. - Fixes a bug where vars\_prompt was not keeping passwords private by default. - Fix ansible-galaxy and the hipchat callback plugin to check that the host it is contacting matches its TLS Certificate. 1.9.3 "Dancing In the Street" - Sep 3, 2015 ------------------------------------------- - Fixes a bug related to keyczar messing up encodings internally, resulting in decrypted messages coming out as empty strings. - AES Keys generated for use in accelerated mode are now 256-bit by default instead of 128. - Fix url fetching for SNI with python-2.7.9 or greater. SNI does not work with python < 2.7.9. The best workaround is probably to use the command module with curl or wget. - Fix url fetching to allow tls-1.1 and tls-1.2 if the system's openssl library supports those protocols - Fix ec2\_ami\_search module to check TLS Certificates - Fix the following extras modules to check TLS Certificates: - campfire - layman - librarto\_annotate - twilio - typetalk - Fix docker module's parsing of docker-py version for dev checkouts - Fix docker module to work with docker server api 1.19 - Change yum module's state=latest feature to update all packages specified in a single transaction. This is the same type of fix as was made for yum's state=installed in 1.9.2 and both solves the same problems and with the same caveats. - Fixed a bug where stdout from a module might be blank when there were were non-printable ASCII characters contained within it 1.9.2 "Dancing In the Street" - Jun 26, 2015 -------------------------------------------- - Security fixes to check that hostnames match certificates with https urls (CVE-2015-3908) - get\_url and uri modules - url and etcd lookup plugins - Security fixes to the zone (Solaris containers), jail (bsd containers), and chroot connection plugins. These plugins can be used to connect to their respective container types in leiu of the standard ssh connection. Prior to this fix being applied these connection plugins didn't properly handle symlinks within the containers which could lead to files intended to be written to or read from the container being written to or read from the host system instead. (CVE pending) - Fixed a bug in the service module where init scripts were being incorrectly used instead of upstart/systemd. - Fixed a bug where sudo/su settings were not inherited from ansible.cfg correctly. - Fixed a bug in the rds module where a traceback may occur due to an unbound variable. - Fixed a bug where certain remote file systems where the SELinux context was not being properly set. - Re-enabled several windows modules which had been partially merged (via action plugins): - win\_copy.ps1 - win\_copy.py - win\_file.ps1 - win\_file.py - win\_template.py - Fix bug using with\_sequence and a count that is zero. Also allows counting backwards isntead of forwards - Fix get\_url module bug preventing use of custom ports with https urls - Fix bug disabling repositories in the yum module. - Fix giving yum module a url to install a package from on RHEL/CENTOS5 - Fix bug in dnf module preventing it from working when yum-utils was not already installed 1.9.1 "Dancing In the Street" - Apr 27, 2015 -------------------------------------------- - Fixed a bug related to Kerberos auth when using winrm with a domain account. - Fixing several bugs in the s3 module. - Fixed a bug with upstart service detection in the service module. - Fixed several bugs with the user module when used on OSX. - Fixed unicode handling in some module situations (assert and shell/command execution). - Fixed a bug in redhat\_subscription when using the activationkey parameter. - Fixed a traceback in the gce module on EL6 distros when multiple pycrypto installations are available. - Added support for PostgreSQL 9.4 in rds\_param\_group - Several other minor fixes. 1.9 "Dancing In the Street" - Mar 25, 2015 ------------------------------------------ Major changes: - Added kerberos support to winrm connection plugin. - Tags rehaul: added 'all', 'always', 'untagged' and 'tagged' special tags and normalized tag resolution. Added tag information to --list-tasks and new --list-tags option. - Privilege Escalation generalization, new 'Become' system and variables now will handle existing and new methods. Sudo and su have been kept for backwards compatibility. New methods pbrun and pfexec in 'alpha' state, planned adding 'runas' for winrm connection plugin. - Improved ssh connection error reporting, now you get back the specific message from ssh. - Added facility to document task module return values for registered vars, both for ansible-doc and the docsite. Documented copy, stats and acl modules, the rest must be updated individually (we will start doing so incrementally). - Optimize the plugin loader to cache available plugins much more efficiently. For some use cases this can lead to dramatic improvements in startup time. - Overhaul of the checksum system, now supports more systems and more cases more reliably and uniformly. - Fix skipped tasks to not display their parameters if no\_log is specified. - Many fixes to unicode support, standarized functions to make it easier to add to input/output boundaries. - Added travis integration to github for basic tests, this should speed up ticket triage and merging. - environment: directive now can also be applied to play and is inhertited by tasks, which can still override it. - expanded facts and OS/distribution support for existing facts and improved performance with pypy. - new 'wantlist' option to lookups allows for selecting a list typed variable vs a comma delimited string as the return. - the shared module code for file backups now uses a timestamp resolution of seconds (previously minutes). - allow for empty inventories, this is now a warning and not an error (for those using localhost and cloud modules). - sped up YAML parsing in ansible by up to 25% by switching to CParser loader. New Modules: - cryptab *-- manages linux encrypted block devices* - gce\_img *-- for utilizing GCE image resources* - gluster\_volume *-- manage glusterfs volumes* - haproxy *-- for the load balancer of same name* - known\_hosts *-- manages the ssh known\_hosts file* - lxc\_container *-- manage lxc containers* - patch *-- allows for patching files on target systems* - pkg5 *-- installing and uninstalling packages on Solaris* - pkg5\_publisher *-- manages Solaris pkg5 repository configuration* - postgresql\_ext *-- manage postgresql extensions* - snmp\_facts *-- gather facts via snmp* - svc *-- manages daemontools based services* - uptimerobot *-- manage monitoring with this service* New Filters: - ternary: allows for trueval/falseval assignment dependent on conditional - cartesian: returns the Cartesian product of 2 lists - to\_uuid: given a string it will return an ansible domain specific UUID - checksum: uses the ansible internal checksum to return a hash from a string - hash: get a hash from a string (md5, sha1, etc) - password\_hash: get a hash form as string that can be used as a password in the user module (and others) - A whole set of ip/network manipulation filters: ipaddr,ipwrap,ipv4,ipv6ipsubnet,nthhost,hwaddr,macaddr Other Notable Changes: - New lookup plugins: - dig: does dns resolution and returns IPs. - url: allows pulling data from a url. - New callback plugins: - syslog\_json: allows logging play output to a syslog network server using json format - Many new enhancements to the amazon web service modules: - ec2 now applies all specified security groups when creating a new instance. Previously it was only applying one - ec2\_vol gained the ability to specify the EBS volume type - ec2\_vol can now detach volumes by specifying instance=None - Fix ec2\_group to purge specific grants rather than whole rules - Added tenancy support for the ec2 module - rds module has gained the ability to manage tags and set charset and public accessibility - ec2\_snapshot module gained the capability to remove snapshots - Add alias support for route53 - Add private\_zones support to route53 - ec2\_asg: Add wait\_for\_instances parameter that waits until an instance is ready before ending the ansible task - Many new docker improvements: - restart\_policy parameters to configure when the container automatically restarts - If the docker client or server doesn't support an option, the task will now fail instead of silently ignoring the option - Add insecure\_registry parameter for connecting to registries via http - New parameter to set a container's domain name - Undeprecated docker\_image module until there's replacement functionality - Allow setting the container's pid namespace - Add a pull parameter that chooses when ansible will look for more recent images in the registry - docker module states have been greatly enhanced. The reworked and new states are: - present now creates but does not start containers - restarted always restarts a container - reloaded restarts a container if ansible detects that the configuration is different than what is specified - reloaded accounts for exposed ports, env vars, and volumes - Can now connect to the docker server using TLS - Several source control modules had force parameters that defaulted to true. These have been changed to default to false so as not to accidentally lose work. Playbooks that depended on the former behaviour simply need to add force=True to the task that needs it. Affected modules: - bzr: When local modifications exist in a checkout, the bzr module used to default to removing the modifications on any operation. Now the module will not remove the modifications unless force=yes is specified. Operations that depend on a clean working tree may fail unless force=yes is added. - git: When local modifications exist in a checkout, the git module will now fail unless force is explicitly specified. Specifying force=yes will allow the module to revert and overwrite local modifications to make git actions succeed. - hg: When local modifications exist in a checkout, the hg module used to default to removing the modifications on any operation. Now the module will not remove the modifications unless force=yes is specified. - subversion: When updating a checkout with local modifications, you now need to add force=yes so the module will revert the modifications before updating. - New inventory scripts: - vbox: virtualbox - consul: use consul as an inventory source - gce gained the ip\_forward parameter to forward ip packets - disk\_auto\_delete parameter to gce that will remove the boot disk after an instance is destroyed - gce can now spawn instances with no external ip - gce\_pd gained the ability to choose a disk type - gce\_net gained target\_tags parameter for creating firewall rules - rax module has new parameters for making use of a boot volume - Add scheduler\_hints to the nova\_compute module for optional parameters - vsphere\_guest now supports deploying guests from a template - Many fixes for hardlink and softlink handling in file-related modules - Implement user, group, mode, and selinux parameters for the unarchive module - authorized\_keys can now use url as a key source - authorized\_keys has a new exclusive parameter that determines if keys that weren't specified in the task - The selinux module now sets the current running state to permissive if state='disabled' - Can now set accounts to expire via the user module - Overhaul of the service module to make code simpler and behave better for systems running several popular init systems - yum module now has a parameter to refresh its cache of package metadata - apt module gained a build\_dep parameter to install a package's build dependencies - Add parameters to the postgres modules to specify a unix socket to connect to the db - The mount module now supports bind mounts - Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally. - Add a refspec argument to the git module that allows pulling commits that aren't part of a branch - Many documentation additions and fixes. 1.8.4 "You Really Got Me" - Feb 19, 2015 ---------------------------------------- - Fixed regressions in ec2 and mount modules, introduced in 1.8.3 1.8.3 "You Really Got Me" - Feb 17, 2015 ---------------------------------------- - Fixing a security bug related to the default permissions set on a temporary file created when using "ansible-vault view ". - Many bug fixes, for both core code and core modules. 1.8.2 "You Really Got Me" - Dec 04, 2014 ---------------------------------------- - Various bug fixes for packaging issues related to modules. - Various bug fixes for lookup plugins. - Various bug fixes for some modules (continued cleanup of postgresql issues, etc.). - Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally. 1.8.1 "You Really Got Me" - Nov 26, 2014 ---------------------------------------- - Various bug fixes in postgresql and mysql modules. - Fixed a bug related to lookup plugins used within roles not finding files based on the relative paths to the roles files/ directory. - Fixed a bug related to vars specified in plays being templated too early, resulting in incorrect variable interpolation. - Fixed a bug related to git submodules in bare repos. 1.8 "You Really Got Me" - Nov 25, 2014 -------------------------------------- Major changes: - fact caching support, pluggable, initially supports Redis (DOCS pending) - 'serial' size in a rolling update can be specified as a percentage - added new Jinja2 filters, 'min' and 'max' that take lists - new 'ansible\_version' variable available contains a dictionary of version info - For ec2 dynamic inventory, ec2.ini can has various new configuration options - 'ansible vault view filename.yml' opens filename.yml decrypted in a pager. - no\_log parameter now surpressess data from callbacks/output as well as syslog - ansible-galaxy install -f requirements.yml allows advanced options and installs from non-galaxy SCM sources and tarballs. - command\_warnings feature will warn about when usage of the shell/command module can be simplified to use core modules - this can be enabled in ansible.cfg - new omit value can be used to leave off a parameter when not set, like so module\_name: a=1 b={{ c \| default(omit) }}, would not pass value for b (not even an empty value) if c was not set. - developers: 'baby JSON' in module responses, originally intended for writing modules in bash, is removed as a feature to simplify logic, script module remains available for running bash scripts. - async jobs started in "fire & forget" mode can now be checked on at a later time. - added ability to subcategorize modules for docs.ansible.com - added ability for shipped modules to have aliases with symlinks - added ability to deprecate older modules by starting with "\_" and including "deprecated: message why" in module docs New Modules: - cloud - rax\_cdb *-- manages Rackspace Cloud Database instances* - rax\_cdb\_database *-- manages Rackspace Cloud Databases* - rax\_cdb\_user *-- manages Rackspace Cloud Database users* - monitoring - bigpanda *-- support for bigpanda* - zabbix\_maintaince *-- handles outage windows with Zabbix* - net\_infrastructure - a10\_server *-- manages server objects on A10 devices* - a10\_service\_group *-- manages service group objects on A10 devices* - a10\_virtual\_server *-- manages virtual server objects on A10 devices* - system - getent *-- read getent databases* Some other notable changes: - added the ability to set "instance filters" in the ec2.ini to limit results from the inventory plugin. - upgrades for various variable precedence items and parsing related items - added a new "follow" parameter to the file and copy modules, which allows actions to be taken on the target of a symlink rather than the symlink itself. - if a module should ever traceback, it will return a standard error, catchable by ignore\_errors, versus an 'unreachable' - ec2\_lc: added support for multiple new parameters like kernel\_id, ramdisk\_id and ebs\_optimized. - ec2\_elb\_lb: added support for the connection\_draining\_timeout and cross\_az\_load\_balancing options. - support for symbolic representations (ie. u+rw) for file permission modes (file/copy/template modules etc.). - docker: Added support for specifying the net type of the container. - docker: support for specifying read-only volumes. - docker: support for specifying the API version to use for the remote connection. - openstack modules: various improvements - irc: ssl support for the notification module - npm: fix flags passed to package installation - windows: improved error handling - setup: additional facts on System Z - apt\_repository: certificate validation can be disabled if requested - pagerduty module: misc improvements - ec2\_lc: public\_ip boolean configurable in launch configurations - ec2\_asg: fixes related to proper termination of an autoscaling group - win\_setup: total memory fact correction - ec2\_vol: ability to list existing volumes - ec2: can set optimized flag - various parser improvements - produce a friendly error message if the SSH key is too permissive - ec2\_ami\_search: support for SSD and IOPS provisioned EBS images - can set ansible\_sudo\_exe as an inventory variable which allows specifying a different sudo (or equivalent) command - git module: Submodule handling has changed. Previously if you used the ``recursive`` parameter to handle submodules, ansible would track the submodule upstream's head revision. This has been changed to checkout the version of the submodule specified in the superproject's git repository. This is inline with what git submodule update does. If you want the old behaviour use the new module parameter track\_submodules=yes - Checksumming of transferred files has been made more portable and now uses the sha1 algorithm instead of md5 to be compatible with FIPS-140. - As a small side effect, the fetch module no longer returns a useful value in remote\_md5. If you need a replacement, switch to using remote\_checksum which returns the sha1sum of the remote file. - ansible-doc CLI tool contains various improvements for working with different terminals And various other bug fixes and improvements ... 1.7.2 "Summer Nights" - Sep 24, 2014 ------------------------------------ - Fixes a bug in accelerate mode which caused a traceback when trying to use that connection method. - Fixes a bug in vault where the password file option was not being used correctly internally. - Improved multi-line parsing when using YAML literal blocks (using > or \|). - Fixed a bug with the file module and the creation of relative symlinks. - Fixed a bug where checkmode was not being honoured during the templating of files. - Other various bug fixes. 1.7.1 "Summer Nights" - Aug 14, 2014 ------------------------------------ - Security fix to disallow specifying 'args:' as a string, which could allow the insertion of extra module parameters through variables. - Performance enhancements related to previous security fixes, which could cause slowness when modules returned very large JSON results. This specifically impacted the unarchive module frequently, which returns the details of all unarchived files in the result. - Docker module bug fixes: - Fixed support for specifying rw/ro bind modes for volumes - Fixed support for allowing the tag in the image parameter - Various other bug fixes 1.7 "Summer Nights" - Aug 06, 2014 ---------------------------------- Major new features: - Windows support (alpha) using native PowerShell remoting - Tasks can now specify ``run_once: true``, meaning they will be executed exactly once. This can be combined with delegate\_to to trigger actions you want done just the one time versus for every host in inventory. New inventory scripts: - SoftLayer - Windows Azure New Modules: - cloud - azure - rax\_meta - rax\_scaling\_group - rax\_scaling\_policy - windows - *version of setup module* - *version of slurp module* - win\_feature - win\_get\_url - win\_group - win\_msi - win\_ping - win\_service - win\_user Other notable changes: - Security fixes - Prevent the use of lookups when using legacy "{{ }}" syntax around variables and with\_\* loops. - Remove relative paths in TAR-archived file names used by ansible-galaxy. - Inventory speed improvements for very large inventories. - Vault password files can now be executable, to support scripts that fetch the vault password. 1.6.10 "And the Cradle Will Rock" - Jul 25, 2014 ------------------------------------------------ - Fixes an issue with the copy module when copying a directory that fails when changing file attributes and the target file already exists - Improved unicode handling when splitting args 1.6.9 "And the Cradle Will Rock" - Jul 24, 2014 ----------------------------------------------- - Further improvements to module parameter parsing to address additional regressions caused by security fixes 1.6.8 "And the Cradle Will Rock" - Jul 22, 2014 ----------------------------------------------- - Corrects a regression in the way shell and command parameters were being parsed 1.6.7 "And the Cradle Will Rock" - Jul 21, 2014 ----------------------------------------------- - Security fixes: - Strip lookup calls out of inventory variables and clean unsafe data returned from lookup plugins (CVE-2014-4966) - Make sure vars don't insert extra parameters into module args and prevent duplicate params from superseding previous params (CVE-2014-4967) 1.6.6 "And the Cradle Will Rock" - Jul 01, 2014 ----------------------------------------------- - Security updates to further protect against the incorrect execution of untrusted data 1.6.4, 1.6.5 "And the Cradle Will Rock" - Jun 25, 2014 ------------------------------------------------------ - Security updates related to evaluation of untrusted remote inputs 1.6.3 "And the Cradle Will Rock" - Jun 09, 2014 ----------------------------------------------- - Corrects a regression where handlers were run across all hosts, not just those that triggered the handler. - Fixed a bug in which modules did not support properly moving a file atomically when su was in use. - Fixed two bugs related to symlinks with directories when using the file module. - Fixed a bug related to MySQL master replication syntax. - Corrects a regression in the order of variable merging done by the internal runner code. - Various other minor bug fixes. 1.6.2 "And the Cradle Will Rock" - May 23, 2014 ----------------------------------------------- - If an improper locale is specified, core modules will now automatically revert to using the 'C' locale. - Modules using the fetch\_url utility will now obey proxy environment variables. - The SSL validation step in fetch\_url will likewise obey proxy settings, however only proxies using the http protocol are supported. - Fixed multiple bugs in docker module related to version changes upstream. - Fixed a bug in the ec2\_group module where egress rules were lost when a VPC was specified. - Fixed two bugs in the synchronize module: - a trailing slash might be lost when calculating relative paths, resulting in an incorrect destination. - the sync might use the inventory directory incorrectly instead of the playbook or role directory. - Files will now only be chown'd on an atomic move if the src/dest uid/gid do not match. 1.6.1 "And the Cradle Will Rock" - May 7, 2014 ---------------------------------------------- - Fixed a bug in group\_by, where systems were being grouped incorrectly. - Fixed a bug where file descriptors may leak to a child process when using accelerate. - Fixed a bug in apt\_repository triggered when python-apt not being installed/available. - Fixed a bug in the apache2\_module module, where modules were not being disabled correctly. 1.6 "And the Cradle Will Rock" - May 5, 2014 -------------------------------------------- Major features/changes: - The deprecated legacy variable templating system has been finally removed. Use {{ foo }} always not $foo or ${foo}. - Any data file can also be JSON. Use sparingly -- with great power comes great responsibility. Starting file with "{" or "[" denotes JSON. - Added 'gathering' param for ansible.cfg to change the default gather\_facts policy. - Accelerate improvements: - multiple users can connect with different keys, when ``accelerate_multi_key = yes`` is specified in the ansible.cfg. - daemon lifetime is now based on the time from the last activity, not the time from the daemon's launch. - ansible-playbook now accepts --force-handlers to run handlers even if tasks result in failures. - Added VMWare support with the vsphere\_guest module. New Modules: - files - replace - packaging - apt\_rpm - composer *(PHP)* - cpanm *(Perl)* - homebrew\_cask *(OS X)* - homebrew\_tap *(OS X)* - layman - portage - monitoring - librato\_annotation - logentries - rollbar\_deployment - notification - nexmo *(SMS)* - slack *(Slack.com)* - sns *(Amazon)* - twilio *(SMS)* - typetalk *(Typetalk.in)* - system - alternatives - capabilities - debconf - locale\_gen - ufw - net\_infrastructure - bigip\_facts - dnssimple - lldp - web\_infrastructure - apache2\_module - cloud - digital\_ocean\_domain - digital\_ocean\_sshkey - ec2\_asg *(configure autoscaling groups)* - ec2\_metric\_alarm - ec2\_scaling\_policy - rax\_identity - rax\_cbs *(cloud block storage)* - rax\_cbs\_attachments - vsphere\_guest Other notable changes: - example callback plugin added for hipchat - added example inventory plugin for vcenter/vsphere - added example inventory plugin for doing really trivial inventory from SSH config files - libvirt module now supports destroyed and paused as states - s3 module can specify metadata - security token additions to ec2 modules - setup module code moved into module\_utils/, facts now accessible by other modules - synchronize module sets relative dirs based on inventory or role path - misc bugfixes and other parameters - the ec2\_key module now has wait/wait\_timeout parameters - added version\_compare filter (see docs) - added ability for module documentation YAML to utilize shared module snippets for common args - apt module now accepts "deb" parameter to install local dpkg files - regex\_replace filter plugin added - added an inventory script for Docker - added an inventory script for Abiquo - the get\_url module now accepts url\_username and url\_password as parameters, so sites which require authentication no longer need to have them embedded in the url - ... to be filled in from changelogs ... 1.5.5 "Love Walks In" - April 18, 2014 -------------------------------------- - Security fix for vault, to ensure the umask is set to a restrictive mode before creating/editing vault files. - Backported apt\_repository security fixes relating to filename/mode upon sources list file creation. 1.5.4 "Love Walks In" - April 1, 2014 ------------------------------------- - Security fix for safe\_eval, which further hardens the checking of the evaluation function. - Changing order of variable precedence for system facts, to ensure that inventory variables take precedence over any facts that may be set on a host. 1.5.3 "Love Walks In" - March 13, 2014 -------------------------------------- - Fix validate\_certs and run\_command errors from previous release - Fixes to the git module related to host key checking 1.5.2 "Love Walks In" - March 11, 2014 -------------------------------------- - Fix module errors in airbrake and apt from previous release 1.5.1 "Love Walks In" - March 10, 2014 -------------------------------------- - Force command action to not be executed by the shell unless specifically enabled. - Validate SSL certs accessed through urllib\*. - Implement new default cipher class AES256 in ansible-vault. - Misc bug fixes. 1.5 "Love Walks In" - February 28, 2014 --------------------------------------- Major features/changes: - when\_foo which was previously deprecated is now removed, use "when:" instead. Code generates appropriate error suggestion. - include + with\_items which was previously deprecated is now removed, ditto. Use with\_nested / with\_together, etc. - only\_if, which is much older than when\_foo and was deprecated, is similarly removed. - ssh connection plugin is now more efficient if you add 'pipelining=True' in ansible.cfg under [ssh\_connection], see example.cfg - localhost/127.0.0.1 is not required to be in inventory if referenced, if not in inventory, it does not implicitly appear in the 'all' group. - git module has new parameters (accept\_hostkey, key\_file, ssh\_opts) to ease the usage of git and ssh protocols. - when using accelerate mode, the daemon will now be restarted when specifying a different remote\_user between plays. - added no\_log: option for tasks. When used, no logging information will be sent to syslog during the module execution. - acl module now handles 'default' and allows for either shorthand entry or specific fields per entry section - play\_hosts is a new magic variable to provide a list of hosts in scope for the current play. - ec2 module now accepts 'exact\_count' and 'count\_tag' as a way to enforce a running number of nodes by tags. - all ec2 modules that work with Eucalyptus also now support a 'validate\_certs' option, which can be set to 'off' for installations using self-signed certs. - Start of new integration test infrastructure (WIP, more details TBD) - if repoquery is unavailable, the yum module will automatically attempt to install yum-utils - ansible-vault: a framework for encrypting your playbooks and variable files - added support for privilege escalation via 'su' into bin/ansible and bin/ansible-playbook and associated keywords 'su', 'su\_user', 'su\_pass' for tasks/plays New modules: - cloud - docker\_image - ec2\_elb\_lb - ec2\_key - ec2\_snapshot - rax\_dns - rax\_dns\_record - rax\_files - rax\_files\_objects - rax\_keypair - rax\_queue - messaging - rabbitmq\_policy - system - at - utilities - assert Other notable changes (many new module params & bugfixes may not be listed): - no\_reboot is now defaulted to "no" in the ec2\_ami module to ensure filesystem consistency in the resulting AMI. - sysctl module overhauled - authorized\_key module overhauled - synchronized module now handles local transport better - apt\_key module now ignores case on keys - zypper\_repository now skips on check mode - file module now responds to force behavior when dealing with hardlinks - new lookup plugin 'csvfile' - fixes to allow hash\_merge behavior to work with dynamic inventory - mysql module will use port argument on dump/import - subversion module now ignores locale to better intercept status messages - rax api\_key argument is no longer logged - backwards/forwards compatibility for OpenStack modules, 'quantum' modules grok neutron renaming - hosts properly uniqueified if appearing in redundant groups - hostname module support added for ScientificLinux - ansible-pull can now show live stdout and pass verbosity levels to ansible-playbook - ec2 instances can now be stopped or started - additional volumes can be created when creating new ec2 instances - user module can move a home directory - significant enhancement and cleanup of rackspace modules - ansible\_ssh\_private\_key\_file can be templated - docker module updated to support docker-py 0.3.0 - various other bug fixes - md5 logic improved during sudo operation - support for ed25519 keys in authorized\_key module - ability to set directory permissions during a recursive copy (directory\_mode parameter) 1.4.5 "Could This Be Magic" - February 12, 2014 ----------------------------------------------- - fixed issue with permissions being incorrect on fireball/accelerate keys when the umask setting was too loose. 1.4.4 "Could This Be Magic" - January 6, 2014 --------------------------------------------- - fixed a minor issue with newer versions of pip dropping the "use-mirrors" parameter. 1.4.3 "Could This Be Magic" - December 20, 2013 ----------------------------------------------- - Fixed role\_path parsing from ansible.cfg - Fixed default role templates 1.4.2 "Could This Be Magic" - December 18, 2013 ----------------------------------------------- - Fixed a few bugs related to unicode - Fixed errors in the ssh connection method with large data returns - Miscellaneous fixes for a few modules - Add the ansible-galaxy command 1.4.1 "Could This Be Magic" - November 27, 2013 ----------------------------------------------- - Misc fixes to accelerate mode and various modules. 1.4 "Could This Be Magic" - November 21, 2013 --------------------------------------------- Highlighted new features: - Added do-until feature, which can be used to retry a failed task a specified number of times with a delay in-between the retries. - Added failed\_when option for tasks, which can be used to specify logical statements that make it easier to determine when a task has failed, or to make it easier to ignore certain non-zero return codes for some commands. - Added the "subelement" lookup plugin, which allows iteration of the keys of a dictionary or items in a list. - Added the capability to use either paramiko or ssh for the initial setup connection of an accelerated playbook. - Automatically provide advice on common parser errors users encounter. - Deprecation warnings are now shown for legacy features: when\_integer/etc, only\_if, include+with\_items, etc. Can be disabled in ansible.cfg - The system will now provide helpful tips around possible YAML syntax errors increasing ease of use for new users. - warnings are now shown for using {{ foo }} in loops and conditionals, and suggest leaving the variable expressions bare as per docs. - The roles search path is now configurable in ansible.cfg. 'roles\_path' in the config setting. - Includes with parameters can now be done like roles for consistency: - { include: song.yml, year:1984, song:'jump' } - The name of each role is now shown before each task if roles are being used - Adds a "var=" option to the debug module for debugging variable data. "debug: var=hostvars['hostname']" and "debug: var=foo" are all valid syntax. - Variables in {{ format }} can be used as references even if they are structured data - Can force binding of accelerate to ipv6 ports. - the apt module will auto-install python-apt if not present rather than requiring a manual installation - the copy module is now recursive if the local 'src' parameter is a directory. - syntax checks now scan included task and variable files as well as main files New modules and plugins. - cloud - docker *- instantiates/removes/manages docker containers* - ec2\_eip *-- manage AWS elastic IPs* - ec2\_vpc *-- manage ec2 virtual private clouds* - elasticcache *-- Manages clusters in Amazon Elasticache* - ovirt *-- VM lifecycle controls for ovirt* - rax\_network *-- sets up Rackspace networks* - rax\_facts *-- retrieve facts about a Rackspace Cloud Server* - rax\_clb\_nodes *-- manage Rackspace cloud load balanced nodes* - rax\_clb *-- manages Rackspace cloud load balancers* - files - acl *-- set or get acls on a file* - synchronize *-- a useful wrapper around rsyncing trees of files* - unarchive *-- pushes and extracts tarballs* - system - blacklist *-- add or remove modules from the kernel blacklist* - firewalld *-- manage the firewalld configuration* - hostname *-- sets the systems hostname* - modprobe *-- manage kernel modules on systems that support modprobe/rmmod* - open\_iscsi *-- manage targets on an initiator using open-iscsi* - utilities - include\_vars *-- dynamically load variables based on conditions.* - packaging - swdepot *-- a module for working with swdepot* - urpmi *-- work with urpmi packages* - zypper\_repository *-- adds or removes Zypper repositories* - notification - grove *-- notifies to Grove hosted IRC channels* - web\_infrastructure - ejabberd\_user *-- add and remove users to ejabberd* - jboss *-- deploys or undeploys apps to jboss* - source\_control - github\_hooks *-- manages GitHub service hooks* - net\_infrastructure - bigip\_monitor\_http *-- manages F5 BIG-IP LTM http monitors* - bigip\_monitor\_tcp *-- manages F5 BIG-IP LTM TCP monitors* - bigip\_node *-- manages F5 BIG-IP LTM nodes* - bigip\_pool\_member *-- manages F5 BIG-IP LTM pool members* - openvswitch\_port - openvswitch\_bridge Plugins: - jail connection module (FreeBSD) - lxc connection module - added inventory script for listing FreeBSD jails - added md5 as a Jinja2 filter: {{ path \| md5 }} - added a fileglob filter that will return files matching a glob pattern. with\_items: "/foo/pattern/\*.txt \| fileglob" - 'changed' filter returns whether a previous step was changed easier. when: registered\_result \| changed - DOCS NEEDED: 'unique' and 'intersect' filters are added for dealing with lists. - DOCS NEEDED: new lookup plugin added for etcd - a 'func' connection type to help people migrating from func/certmaster. Misc changes (all module additions/fixes may not listed): - (docs pending) New features for accelerate mode: configurable timeouts and a keepalives for long running tasks. - Added a ``delimiter`` field to the assemble module. - Added ``ansible_env`` to the list of facts returned by the setup module. - Added ``state=touch`` to the file module, which functions similarly to the command-line version of ``touch``. - Added a -vvvv level, which will show SSH client debugging information in the event of a failure. - Includes now support the more standard syntax, similar to that of role includes and dependencies. - Changed the ``user:`` parameter on plays to ``remote_user:`` to prevent confusion with the module of the same name. Still backwards compatible on play parameters. - Added parameter to allow the fetch module to skip the md5 validation step ('validate\_md5=false'). This is useful when fetching files that are actively being written to, such as live log files. - Inventory hosts are used in the order they appear in the inventory. - in hosts: foo[2-5] type syntax, the iterators now are zero indexed and the last index is non-inclusive, to match Python standards. - There is now a way for a callback plugin to disable itself. See osx\_say example code for an example. - Many bugfixes to modules of all types. - Complex arguments now can be used with async tasks - SSH ControlPath is now configurable in ansible.cfg. There is a limit to the lengths of these paths, see how to shorten them in ansible.cfg. - md5sum support on AIX with csum. - Extremely large documentation refactor into subchapters - Added 'append\_privs' option to the mysql\_user module - Can now update (temporarily change) host variables using the "add\_host" module for existing hosts. - Fixes for IPv6 addresses in inventory text files - name of executable can be passed to pip/gem etc, for installing under *different* interpreters - copy of ./hacking/env-setup added for fish users, ./hacking/env-setup.fish - file module more tolerant of non-absolute paths in softlinks. - miscellaneous fixes/upgrades to async polling logic. - conditions on roles now pass to dependent roles - ansible\_sudo\_pass can be set in a host variable if desired - misc fixes for the pip an easy\_install modules - support for running handlers that have parameterized names based on role parameters - added support for compressing MySQL dumps and extracting during import - Boto version compatibility fixes for the EC2 inventory script - in the EC2 inventory script, a group 'EC2' and 'RDS' contains EC2 and RDS hosts. - umask is enforced by the cron module - apt packages that are not-removed and not-upgraded do not count as changes - the assemble module can now use src files from the local server and copy them over dynamically - authorization code has been standardized between Amazon cloud modules - the wait\_for module can now also wait for files to exist or a regex string to exist in a file - leading ranges are now allowed in ranged hostname patterns, ex: [000-250].example.com - pager support added to ansible-doc (so it will auto-invoke less, etc) - misc fixes to the cron module - get\_url module now understands content-disposition headers for deciding filenames - it is possible to have subdirectories in between group\_vars/ and host\_vars/ and the final filename, like host\_vars/rack42/asdf for the variables for host 'asdf'. The intermediate directories are ignored, and do not put a file in there twice. 1.3.4 "Top of the World" (reprise) - October 29, 2013 ----------------------------------------------------- - Fixed a bug in the copy module, where a filename containing the string "raw" was handled incorrectly - Fixed a bug in accelerate mode, where copying a zero-length file out would fail 1.3.3 "Top of the World" (reprise) - October 9, 2013 ---------------------------------------------------- Additional fixes for accelerate mode. 1.3.2 "Top of the World" (reprise) - September 19th, 2013 --------------------------------------------------------- Multiple accelerate mode fixes: - Make packet reception less greedy, so multiple frames of data are not consumed by one call. - Adding two timeout values (one for connection and one for data reception timeout). - Added keepalive packets, so async mode is no longer required for long-running tasks. - Modified accelerate daemon to use the verbose logging level of the ansible command that started it. - Fixed bug where accelerate would not work in check-mode. - Added a -vvvv level, which will show SSH client debugging information in the event of a failure. - Fixed bug in apt\_repository module where the repository cache was not being updated. - Fixed bug where "too many open files" errors would be encountered due to pseudo TTY's not being closed properly. 1.3.1 "Top of the World" (reprise) - September 16th, 2013 --------------------------------------------------------- Fixing a bug in accelerate mode whereby the gather\_facts step would always be run via sudo regardless of the play settings. 1.3 "Top of the World" - September 13th, 2013 --------------------------------------------- Highlighted new features: - accelerated mode: An enhanced fireball mode that requires zero bootstrapping and fewer requirements plus adds capabilities like sudo commands. - role defaults: Allows roles to define a set of variables at the lowest priority. These variables can be overridden by any other variable. - new /etc/ansible/facts.d allows JSON or INI-style facts to be provided from the remote node, and supports executable fact programs in this dir. Files must end in \*.fact. - added the ability to make undefined template variables raise errors (see ansible.cfg) - (DOCS PENDING) sudo: True/False and sudo\_user: True/False can be set at include and role level - added changed\_when: (expression) which allows overriding whether a result is changed or not and can work with registered expressions - --extra-vars can now take a file as input, e.g., "-e @filename" and can also be formatted as YAML - external inventory scripts may now return host variables in one pass, which allows them to be much more efficient for large numbers of hosts - if --forks exceeds the numbers of hosts, it will be automatically reduced. Set forks to 0 and you get "as many forks as I have hosts" out of the box. - enabled error\_on\_undefined\_vars by default, which will make errors in playbooks more obvious - role dependencies -- one role can now pull in another, with parameters of its own. - added the ability to have tasks execute even during a check run (always\_run). - added the ability to set the maximum failure percentage for a group of hosts. New modules: - notifications - datadog\_event *-- send data to datadog* - cloud - digital\_ocean *-- module for DigitalOcean provisioning that also includes inventory support* - rds *-- Amazon Relational Database Service* - linode *-- modules for Linode provisioning that also includes inventory support* - route53 *-- manage Amazon DNS entries* - ec2\_ami *-- manages (and creates!) ec2 AMIs* - database - mysql\_replication *-- manages mysql replication settings for masters/slaves* - mysql\_variables *-- manages mysql runtime variables* - redis *-- manages redis databases (slave mode and flushing data)* - net\_infrastructure - arista\_interface - arista\_l2interface - arista\_lag - arista\_vlan - dnsmadeeasy *-- manipulate DNS Made Easy records* - system - stat *-- reports on stat(istics) of remote files, for use with 'register'* - web\_infrastructure - htpasswd *-- manipulate htpasswd files* - packaging - apt\_repository *-- rewritten to remove dependencies* - rpm\_key *-- adds or removes RPM signing keys* - monitoring - boundary\_meter *-- adds or removes boundary.com meters* - files - xattr *-- manages extended attributes on files* Misc changes: - return 3 when there are hosts that were unreachable during a run - the yum module now supports wildcard values for the enablerepo argument - added an inventory script to pull host information from Zabbix - async mode no longer allows with\_\* lookup plugins due to incompatibilities - Added OpenRC support (Gentoo) to the service module - ansible\_ssh\_user value is available to templates - added placement\_group parameter to ec2 module - new sha256sum parameter added to get\_url module for checksum validation - search for mount binaries in system path and sbin vs assuming path - allowed inventory file to be read from a pipe - added Solaris distribution facts - fixed bug along error path in quantum\_network module - user password update mode is controllable in user module now (at creation vs. every time) - added check mode support to the OpenBSD package module - Fix for MySQL 5.6 compatibility - HP UX virtualization facts - fixed some executable bits in git - made rhn\_register module compatible with EL5 - fix for setup module epoch time on Solaris - sudo\_user is now expanded later, allowing it to be set at inventory scope - mongodb\_user module changed to also support MongoDB 2.2 - new state=hard option added to the file module for hardlinks vs softlinks - fixes to apt module purging option behavior - fixes for device facts with multiple PCI domains - added "with\_inventory\_hostnames" lookup plugin, which can take a pattern and loop over hostnames matching the pattern and is great for use with delegate\_to and so on - ec2 module supports adding to multiple security groups - cloudformation module includes fixes for the error path, and the 'wait\_for' parameter was removed - added --only-if-changed to ansible-pull, which runs only if the repo has changes (not default) - added 'mandatory', a Jinja2 filter that checks if a variable is defined: {{ foo\|mandatory }} - added support for multiple size formats to the lvol module - timing reporting on wait\_for module now includes the delay time - IRC module can now send a server password - "~" now expanded on each component of configured plugin paths - fix for easy\_install module when dealing with virtualenv - rackspace module now explicitly indicates rackspace vs vanilla openstack - add\_host module does not report changed=True any longer - explanatory error message when using fireball with sudo has been improved - git module now automatically pulls down git submodules - negated patterns do not require "all:!foo", you can just say "!foo" now to select all not foos - fix for Debian services always reporting changed when toggling enablement bit - roles files now tolerate files named 'main.yaml' and 'main' in addition to main.yml - some help cleanup to command line flags on scripts - force option reinstated for file module so it can create symlinks to non-existent files, etc. - added termination support to ec2 module - --ask-sudo-pass or --sudo-user does not enable all options to use sudo in ansible-playbook - include/role conditionals are added ahead of task conditionals so they can short circuit properly - added pipes.quote in various places so paths with spaces are better tolerated - error handling while executing Jinja2 filters has been improved - upgrades to atomic replacement logic when copying files across partitions/etc - mysql user module can try to login before requiring explicit password - various additional options added to supervisorctl module - only add non unique parameter on group creation when required - allow rabbitmq\_plugin to specify a non-standard RabbitMQ path - authentication fixes to keystone\_user module - added IAM role support to EC2 module - fixes for OpenBSD package module to avoid shell expansion - git module upgrades to allow --depth and --version to be used together - new lookup plugin, "with\_flattened" - extra vars (-e) variables can be used in playbook include paths - improved reporting for invalid sudo passwords - improved reporting for inability to find a suitable tmp location - require libselinux-python to perform file operations if SELinux is operational - ZFS module fixes for byte display constants and handling paths with spaces - setup module more tolerant of gathering facts against things it does not have permission to read - can specify name=\* state=latest to update all yum modules - major speedups to the yum module for default cases - ec2\_facts module will now run in check mode - sleep option on service module for sleeping between stop/restart - fix for IPv6 facts on BSD - added Jinja2 filters: skipped, whether a result was skipped - added Jinja2 filters: quote, quotes a string if it needs to be quoted - allow force=yes to affect apt upgrades - fix for saving conditionals in variable names - support for multiple host ranges in INI inventory, e.g., db[01:10:3]node-[01:10] - fixes/improvements to cron module - add user\_install=no option to gem module to install gems system wide - added raw=yes to allow copying without python on remote machines - added with\_indexed\_items lookup plugin - Linode inventory plugin now significantly faster - added recurse=yes parameter to pacman module for package removal - apt\_key module can now target specific keyrings (keyring=filename) - ec2 module change reporting improved - hg module now expands user paths (~) - SSH connection type known host checking now can process hashed known\_host files - lvg module now checks for executables in more correct locations - copy module now works correctly with sudo\_user - region parameter added to ec2\_elb module - better default XMPP module message types - fixed conditional tests against raw booleans - mysql module grant removal is now smarter - apt-remove is now forced to be non-interactive - support ; comments in INI file module - fixes to callbacks WRT async output (fire and forget tasks now trigger callbacks!) - folder support for s3 module - added new example inventory plugin for Red Hat OpenShift - and other misc. bugfixes 1.2.3 "Hear About It Later" (reprise) -- Aug 21, 2013 ----------------------------------------------------- - Local security fixes for predictable file locations for ControlPersist and retry file paths on shared machines on operating systems without kernel symlink/hardlink protections. 1.2.2 "Hear About It Later" (reprise) -- July 4, 2013 ----------------------------------------------------- - Added a configuration file option [paramiko\_connection] record\_host\_keys which allows the code that paramiko uses to update known\_hosts to be disabled. This is done because paramiko can be very slow at doing this if you have a large number of hosts and some folks may not want this behavior. This can be toggled independently of host key checking and does not affect the ssh transport plugin. Use of the ssh transport plugin is preferred if you have ControlPersist capability, and Ansible by default in 1.2.1 and later will autodetect. 1.2.1 "Hear About It Later" -- July 4, 2013 ------------------------------------------- - Connection default is now "smart", which discovers if the system openssh can support ControlPersist, and uses it if so, if not falls back to paramiko. - Host key checking is on by default. Disable it if you like by adding host\_key\_checking=False in the [default] section of /etc/ansible/ansible.cfg or ~/ansible.cfg or by exporting ANSIBLE\_HOST\_KEY\_CHECKING=False - Paramiko now records host keys it was in contact with host key checking is on. It is somewhat sluggish when doing this, so switch to the 'ssh' transport if this concerns you. 1.2 "Right Now" -- June 10, 2013 -------------------------------- Core Features: - capability to set 'all\_errors\_fatal: True' in a playbook to force any error to stop execution versus a whole group or serial block needing to fail usable, without breaking the ability to override in ansible - ability to use variables from {{ }} syntax in mainline playbooks, new 'when' conditional, as detailed in documentation. Can disable old style replacements in ansible.cfg if so desired, but are still active by default. - can set ansible\_ssh\_private\_key\_file as an inventory variable (similar to ansible\_ssh\_host, etc) - 'when' statement can be affixed to task includes to auto-affix the conditional to each task therein - cosmetic: "\*\*\*\*\*" banners in ansible-playbook output are now constant width - --limit can now be given a filename (--limit @filename) to constrain a run to a host list on disk - failed playbook runs will create a retry file in /var/tmp/ansible usable with --limit - roles allow easy arrangement of reusable tasks/handlers/files/templates - pre\_tasks and post\_tasks allow for separating tasks into blocks where handlers will fire around them automatically - "meta: flush\_handler" task capability added for when you really need to force handlers to run - new --start-at-task option to ansible playbook allows starting at a specific task name in a long playbook - added a log file for ansible/ansible-playbook, set 'log\_path' in the configuration file or ANSIBLE\_LOG\_PATH in environment - debug mode always outputs debug in playbooks, without needing to specify -v - external inventory script added for Spacewalk / Red Hat Satellite servers - It is now possible to feed JSON structures to --extra-vars. Pass in a JSON dictionary/hash to feed in complex data. - group\_vars/ and host\_vars/ directories can now be kept alongside the playbook as well as inventory (or both!) - more filters: ability to say {{ foo\|success }} and {{ foo\|failed }} and when: foo\|success and when: foo\|failed - more filters: {{ path\|basename }} and {{ path\|dirname }} - lookup plugins now use the basedir of the file they have included from, avoiding needs of ../../../ in places and increasing the ease at which things can be reorganized. Modules added: - cloud - rax *-- module for creating instances in the rackspace cloud (uses pyrax)* - packages - npm *-- node.js package management* - pkgng *-- next-gen package manager for FreeBSD* - redhat\_subscription *-- manage Red Hat subscription usage* - rhn\_register *-- basic RHN registration* - zypper *(SuSE)* - database - postgresql\_priv *-- manages postgresql privileges* - networking - bigip\_pool *-- load balancing with F5s* - ec2\_elb *-- add and remove machines from ec2 elastic load balancers* - notification - hipchat *-- send notification events to hipchat* - flowdock *-- send messages to flowdock during playbook runs* - campfire *-- send messages to campfire during playbook runs* - mqtt *-- send messages to the Mosquitto message bus* - irc *-- send messages to IRC channels* - filesystem *-- a wrapper around mkfs* - jabber *-- send jabber chat messages* - osx\_say *-- make OS X say things out loud* - openstack - glance\_image - nova\_compute - nova\_keypair - keystone\_user - quantum\_floating\_ip - quantum\_floating\_ip\_associate - quantum\_network - quantum\_router - quantum\_router\_gateway - quantum\_router\_interface - quantum\_subnet - monitoring - airbrake\_deployment *-- notify airbrake of new deployments* - monit - newrelic\_deployment *-- notifies newrelic of new deployments* - pagerduty - pingdom - utility - set\_fact *-- sets a variable, which can be the result of a template evaluation* Modules removed - vagrant -- can't be compatible with both versions at once, just run things though the vagrant provisioner in vagrant core Bugfixes and Misc Changes: - service module happier if only enabled=yes\|no specified and no state - mysql\_db: use --password= instead of -p in dump/import so it doesn't go interactive if no pass set - when using -c ssh and the ansible user is the current user, don't pass a -o to allow SSH config to be - overwrite parameter added to the s3 module - private\_ip parameter added to the ec2 module - $FILE and $PIPE now tolerate unicode - various plugin loading operations have been made more efficient - hostname now uses platform.node versus socket.gethostname to be more consistent with Unix 'hostname' - fix for SELinux operations on Unicode path names - inventory directory locations now ignore files with .ini extensions, making hybrid inventory easier - copy module in check-mode now reports back correct changed status when used with force=no - added avail. zone to ec2 module - fixes to the hash variable merging logic if so enabled in the main settings file (default is to replace, not merge hashes) - group\_vars and host\_vars files can now end in a .yaml or .yml extension, (previously required no extension, still favored) - ec2vol module improvements - if the user module is told to generate the ssh key, the key generated is now returned in the results - misc fixes to the Riak module - make template module slightly more efficient - base64encode / decode filters are now available to templates - libvirt module can now work with multiple different libvirt connection URIs - fix for postgresql password escaping - unicode fix for shlex.split in some cases - apt module upgrade logic improved - URI module now can follow redirects - yum module can now install off http URLs - sudo password now defaults to ssh password if you ask for both and just hit enter on the second prompt - validate feature on copy and template module, for example, running visudo prior to copying the file over - network facts upgraded to return advanced configs (bonding, etc) - region support added to ec2 module - riak module gets a wait for ring option - improved check mode support in the file module - exception handling added to handle scenario when attempt to log to systemd journal fails - fix for upstart handling when toggling the enablement and running bits at the same time - when registering a task with a conditional attached, and the task is skipped by the conditional, the variable is still registered for the host, with the attribute skipped: True. - delegate\_to tasks can look up ansible\_ssh\_private\_key\_file variable from inventory correctly now - s3 module takes a 'dest' parameter to change the destination for uploads - apt module gets a cache\_valid\_time option to avoid redundant cache updates - ec2 module better understands security groups - fix for postgresql codec usage - setup module now tolerant of OpenVZ interfaces - check mode reporting improved for files and directories - doc system now reports on module requirements - group\_by module can now also make use of globally scoped variables - localhost and 127.0.0.1 are now fuzzy matched in inventory (are now more or less interchangeable) - AIX improvements/fixes for users, groups, facts - lineinfile now does atomic file replacements - fix to not pass PasswordAuthentication=no in the config file unnecessarily for SSH connection type - for authorized\_key on Debian Squeeze - fixes for apt\_repository module reporting changed incorrectly on certain repository types - allow the virtualenv argument to the pip module to be a pathname - service pattern argument now correctly read for BSD services - fetch location can now be controlled more directly via the 'flat' parameter. - added basename and dirname as Jinja2 filters available to all templates - pip works better when sudoing from unprivileged users - fix for user creation with groups specification reporting 'changed' incorrectly in some cases - fix for some unicode encoding errors in outputting some data in verbose mode - improved FreeBSD, NetBSD and Solaris facts - debug module always outputs data without having to specify -v - fix for sysctl module creating new keys (must specify checks=none) - NetBSD and OpenBSD support for the user and groups modules - Add encrypted password support to password lookup 1.1 "Mean Street" -- 4/2/2013 ----------------------------- Core Features - added --check option for "dry run" mode - added --diff option to show how templates or copied files change, or might change - --list-tasks for the playbook will list the tasks without running them - able to set the environment by setting "environment:" as a dictionary on any task (go proxy support!) - added ansible\_ssh\_user and ansible\_ssh\_pass for per-host/group username and password - jinja2 extensions can now be loaded from the config file - support for complex arguments to modules (within reason) - can specify ansible\_connection=X to define the connection type in inventory variables - a new chroot connection type - module common code now has basic type checking (and casting) capability - module common now supports a 'no\_log' attribute to mark a field as not to be syslogged - inventory can now point to a directory containing multiple scripts/hosts files, if using this, put group\_vars/host\_vars directories inside this directory - added configurable crypt scheme for 'vars\_prompt' - password generating lookup plugin -- $PASSWORD(path/to/save/data/in) - added --step option to ansible-playbook, works just like Linux interactive startup! Modules Added: - bzr *(bazaar version control)* - cloudformation - django-manage - gem *(ruby gems)* - homebrew - lvg *(logical volume groups)* - lvol *(LVM logical volumes)* - macports - mongodb\_user - netscaler - okg - openbsd\_pkg - rabbit\_mq\_parameter - rabbit\_mq\_plugin - rabbit\_mq\_user - rabbit\_mq\_vhost - rhn\_channel - s3 *-- allows putting file contents in buckets for sharing over s3* - uri module *-- can get/put/post/etc* - vagrant *-- launching VMs with vagrant, this is different from existing vagrant plugin* - zfs Bugfixes and Misc Changes: - stderr shown when commands fail to parse - uses yaml.safe\_dump in filter plugins - authentication Q&A no longer happens before --syntax-check, but after - ability to get hostvars data for nodes not in the setup cache yet - SSH timeout now correctly passed to native SSH connection plugin - raise an error when multiple when\_ statements are provided - --list-hosts applies host limit selections better - (internals) template engine specifications to use template\_ds everywhere - better error message when your host file can not be found - end of line comments now work in the inventory file - directory destinations now work better with remote md5 code - lookup plugin macros like $FILE and $ENV now work without returning arrays in variable definitions/playbooks - uses yaml.safe\_load everywhere - able to add EXAMPLES to documentation via EXAMPLES docstring, rather than just in main documentation YAML - can set ANSIBLE\_COW\_SELECTION to pick other cowsay types (including random) - to\_nice\_yaml and to\_nice\_json available as Jinja2 filters that indent and sort - cowsay able to run out of macports (very important!) - improved logging for fireball mode - nicer error message when talking to an older system that needs a JSON module installed - 'magic' variable 'inventory\_dir' now gives path to inventory file - 'magic' variable 'vars' works like 'hostvars' but gives global scope variables, useful for debugging in templates mostly - conditionals can be used on plugins like add\_host - developers: all callbacks now have access to a ".runner" and ".playbook", ".play", and ".task" object (use getattr, they may not always be set!) Facts: - block device facts for the setup module - facts for AIX - fact detection for OS type on Amazon Linux - device fact gathering stability improvements - ansible\_os\_family fact added - user\_id (remote user name) - a whole series of current time information under the 'datetime' hash - more OS X facts - support for detecting Alpine Linux - added facts for OpenBSD Module Changes/Fixes: - ansible module common code (and ONLY that) which is mixed in with modules, is now BSD licensed. App remains GPLv3. - service code works better on platforms that mix upstart, systemd, and system-v - service enablement idempotence fixes for systemd and upstart - service status 4 is also 'not running' - supervisorctl restart fix - increased error handling for ec2 module - can recursively set permissions on directories - ec2: change to the way AMI tags are handled - cron module can now also manipulate cron.d files - virtualenv module can now inherit system site packages (or not) - lineinfile module now has an insertbefore option - NetBSD service module support - fixes to sysctl module where item has multiple values - AIX support for the user and group modules - able to specify a different hg repo to pull from than the original set - add\_host module can set ports and other inventory variables - add\_host module can add modules to multiple groups (groups=a,b,c), groups now alias for groupname - subnet ID can be set on EC2 module - MySQL module password handling improvements - added new virtualenv flags to pip and easy\_install modules - various improvements to lineinfile module, now accepts common arguments from file - force= now replaces thirsty where used before, thirsty remains an alias - setup module can take a 'filter=' parameter to just return a few facts (not used by playbooks) - cron module works even if no crontab is present (for cron.d) - security group ID settable on EC2 module - misc fixes to sysctl module - fix to apt module so packages not in cache are still removable - charset fix to mail module - postresql db module now does not try to create the 'PUBLIC' user - SVN module now works correctly with self signed certs - apt module now has an upgrade parameter (values=yes, no, or 'dist') - nagios module gets new silence/unsilence commands - ability to disable proxy usage in get\_url (use\_proxy=no) - more OS X facts - added a 'fail\_on\_missing' (default no) option to fetch - added timeout to the uri module (default 30 seconds, adjustable) - ec2 now has a 'wait' parameter to wait for the instance to be active, eliminates need for separate wait\_for call. - allow regex backreferences in lineinfile - id attribute on ec2 module can be used to set idempotent-do-not-recreate launches - icinga support for nagios module - fix default logins when no my.conf for MySQL module - option to create users with non-unique UIDs (user module) - macports module can enable/disable packages - quotes in my.cnf are stripped by the MySQL modules - Solaris Service management added - service module will attempt to auto-add unmanaged chkconfig services when needed - service module supports systemd service unit files Plugins: - added 'with\_random\_choice' filter plugin - fixed ~ expansion for fileglob - with\_nested allows for nested loops (see examples in examples/playbooks) 1.0 "Eruption" -- Feb 1 2013 ---------------------------- New modules: - apt\_key - ec2\_facts - hg *(now in core)* - pacman *(Arch linux)* - pkgin *(Joyent SmartOS)* - sysctl New config settings: - sudo\_exe parameter can be set in config to use sudo alternatives - sudo\_flags parameter can alter the flags used with sudo New playbook/language features: - added when\_failed and when\_changed - task includes can now be of infinite depth - when\_set and when\_unset can take more than one var (when\_set: $a and $b and $c) - added the with\_sequence lookup plugin - can override "connection:" on an individual task - parameterized playbook includes can now define complex variables (not just all on one line) - making inventory variables available for use in vars\_files paths - messages when skipping plays are now more clear - --extra-vars now has maximum precedence (as intended) Module fixes and new flags: - ability to use raw module without python on remote system - fix for service status checking on Ubuntu - service module now responds to additional exit code for SERVICE\_UNAVAILABLE - fix for raw module with '-c local' - various fixes to git module - ec2 module now reports the public DNS name - can pass executable= to the raw module to specify alternative shells - fix for postgres module when user contains a "-" - added additional template variables -- $template\_fullpath and $template\_run\_date - raise errors on invalid arguments used with a task include statement - shell/command module takes a executable= parameter to specify a different shell than /bin/sh - added return code and error output to the raw module - added support for @reboot to the cron module - misc fixes to the pip module - nagios module can schedule downtime for all services on the host - various subversion module improvements - various mail module improvements - SELinux fix for files created by authorized\_key module - "template override" ?? - get\_url module can now send user/password authorization - ec2 module can now deploy multiple simultaneous instances - fix for apt\_key modules stalling in some situations - fix to enable Jinja2 {% include %} to work again in template - ec2 module is now powered by Boto - setup module can now detect if package manager is using pacman - fix for yum module with enablerepo in use on EL 6 Core fixes and new behaviors: - various fixes for variable resolution in playbooks - fixes for handling of "~" in some paths - various fixes to DWIM'ing of relative paths - /bin/ansible now takes a --list-hosts just like ansible-playbook did - various patterns can now take a regex vs a glob if they start with "~" (need docs on which!) - also /usr/bin/ansible - allow intersecting host patterns by using "&" ("webservers:!debian:&datacenter1") - handle tilde shell character for --private-key - hash merging policy is now selectable in the config file, can choose to override or merge - environment variables now available for setting all plugin paths (ANSIBLE\_CALLBACK\_PLUGINS, etc) - added packaging file for macports (not upstreamed yet) - hacking/test-module script now uses /usr/bin/env properly - fixed error formatting for certain classes of playbook syntax errors - fix for processing returns with large volumes of output Inventory files/scripts: - hostname patterns in the inventory file can now use alphabetic ranges - whitespace is now allowed around group variables in the inventory file - inventory scripts can now define groups of groups and group vars (need example for docs?) 0.9 "Dreams" -- Nov 30 2012 --------------------------- Highlighted core changes: - various performance tweaks, ansible executes dramatically less SSH ops per unit of work - close paramiko SFTP connections less often on copy/template operations (speed increase) - change the way we use multiprocessing (speed/RAM usage improvements) - able to set default for asking password & sudo password in config file - ansible now installs nicely if running inside a virtualenv - flag to allow SSH connection to move files by scp vs sftp (in config file) - additional RPM subpackages for easily installing fireball mode deps (server and node) - group\_vars/host\_vars now available to ansible, not just playbooks - native ssh connection type (-c ssh) now supports passwords as well as keys - ansible-doc program to show details Other core changes: - fix for template calls when last character is '$' - if ansible\_python\_interpreter is set on a delegated host, it now works as intended - --limit can now take "," as separator as well as ";" or ":" - msg is now displaced with newlines when a task fails - if any with\_ plugin has no results in a list (empty list for with\_items, etc), the task is now skipped - various output formatting fixes/improvements - fix for Xen dom0/domU detection in default facts - 'ansible\_domain' fact now available (ex value: example.com) - configured remote temp file location is now always used even for root - 'register'-ed variables are not recorded for skipped hosts (for example, using only\_if/when) - duplicate host records for the same host can no longer result when a host is listed in multiple groups - ansible-pull now passes --limit to prevent running on multiple hosts when used with generic playbooks - remote md5sum check fixes for Solaris 10 - ability to configure syslog facility used by remote module calls - in templating, stray '$' characters are now handled more correctly Playbook changes: - relative paths now work for 'first\_available\_file' - various templating engine fixes - 'when' is an easier form of only if - --list-hosts on the playbook command now supports multiple playbooks on the same command line - playbook includes can now be parameterized Module additions: - (addhost) new module for adding a temporary host record (used for creating new guests) - (group\_by) module allows partitioning hosts based on group data - (ec2) new module for creating ec2 hosts - (script) added 'script' module for pushing and running self-deleting remote scripts - (svr4pkg) solaris svr4pkg module Module changes: - (authorized key) module uses temp file now to prevent failure on full disk - (fetch) now uses the 'slurp' internal code to work as you would expect under sudo'ed accounts - (fetch) internal usage of md5 sums fixed for BSD - (get\_url) thirsty is no longer required for directory destinations - (git) various git module improvements/tweaks - (group) now subclassed for various platforms, includes SunOS support - (lineinfile) create= option on lineinfile can create the file when it does not exist - (mysql\_db) module takes new grant options - (postgresql\_db) module now takes role\_attr\_flags - (service) further upgrades to service module service status reporting - (service) tweaks to get service module to play nice with BSD style service systems (rc.conf) - (service) possible to pass additional arguments to services - (shell) and command module now take an 'executable=' flag for specifying an alternate shell than /bin/sh - (user) ability to create SSH keys for users when using user module to create users - (user) atomic replacement of files preserves permissions of original file - (user) module can create SSH keys - (user) module now does Solaris and BSD - (yum) module takes enablerepo= and disablerepo= - (yum) misc yum module fixing for various corner cases Plugin changes: - EC2 inventory script now produces nicer failure message if AWS is down (or similar) - plugin loading code now more streamlined - lookup plugins for DNS text records, environment variables, and redis - added a template lookup plugin $TEMPLATE('filename.j2') - various tweaks to the EC2 inventory plugin - jinja2 filters are now pluggable so it's easy to write your own (to\_json/etc, are now impl. as such) 0.8 "Cathedral" -- Oct 19, 2012 ------------------------------- Highlighted Core Changes: - fireball mode -- ansible can bootstrap a ephemeral 0mq (zeromq) daemon that runs as a given user and expires after X period of time. It is very fast. - playbooks with errors now return 2 on failure. 1 indicates a more fatal syntax error. Similar for /usr/bin/ansible - server side action code (template, etc) are now fully pluggable - ability to write lookup plugins, like the code powering "with\_fileglob" (see below) Other Core Changes: - ansible config file can also go in 'ansible.cfg' in cwd in addition to ~/.ansible.cfg and /etc/ansible/ansible.cfg - fix for inventory hosts at API level when hosts spec is a list and not a colon delimited string - ansible-pull example now sets up logrotate for the ansible-pull cron job log - negative host matching (!hosts) fixed for external inventory script usage - internals: os.executable check replaced with utils function so it plays nice on AIX - Debian packaging now includes ansible-pull manpage - magic variable 'ansible\_ssh\_host' can override the hostname (great for usage with tunnels) - date command usage in build scripts fixed for OS X - don't use SSH agent with paramiko if a password is specified - make output be cleaner on multi-line command/shell errors - /usr/bin/ansible now prints things when tasks are skipped, like when creates= is used with -m command and /usr/bin/ansible - when trying to async a module that is not a 'normal' asyncable module, ansible will now let you know - ability to access inventory variables via 'hostvars' for hosts not yet included in any play, using on demand lookups - merged ansible-plugins, ansible-resources, and ansible-docs into the main project - you can set ANSIBLE\_NOCOWS=1 if you want to disable cowsay if it is installed. Though no one should ever want to do this! Cows are great! - you can set ANSIBLE\_FORCE\_COLOR=1 to force color mode even when running without a TTY - fatal errors are now properly colored red. - skipped messages are now cyan, to differentiate them from unchanged messages. - extensive documentation upgrades - delegate\_action to localhost (aka local\_action) will always use the local connection type Highlighted playbook changes: - is\_set is available for use inside of an only\_if expression: is\_set('ansible\_eth0'). We intend to further upgrade this with a 'when' keyword providing better options to 'only\_if' in the next release. Also is\_unset('ansible\_eth0') - playbooks can import playbooks in other directories and then be able to import tasks relative to them - FILE($path) now allows access of contents of file in a path, very good for use with SSH keys - similarly PIPE($command) will run a local command and return the results of executing this command - if all hosts in a play fail, stop the playbook, rather than letting the console log spool on by - only\_if using register variables that are booleans now works in a boolean way like you'd expect - task includes now work with with\_items (such as: include: path/to/wordpress.yml user=$item) - when using a $list variable with $var or ${var} syntax it will automatically join with commas - setup is not run more than once when we know it is has already been run in a play that included another play, etc - can set/override sudo and sudo\_user on individual tasks in a play, defaults to what is set in the play if not present - ability to use with\_fileglob to iterate over local file patterns - templates now use Jinja2's 'trim\_blocks=True' to avoid stray newlines, small changes to templates may be required in rare cases. Other playbook changes: - to\_yaml and from\_yaml are available as Jinja2 filters - $group and $group\_names are now accessible in with\_items - where 'stdout' is provided a new 'stdout\_lines' variable (type == list) is now generated and usable with with\_items - when local\_action is used the transport is automatically overridden to the local type - output on failed playbook commands is now nicely split for stderr/stdout and syntax errors - if local\_action is not used and delegate\_to was 127.0.0.1 or localhost, use local connection regardless - when running a playbook, and the statement has changed, prints 'changed:' now versus 'ok:' so it is obvious without colored mode - variables now usable within vars\_prompt (just not host/group vars) - setup facts are now retained across plays (dictionary just gets updated as needed) - --sudo-user now works with --extra-vars - fix for multi\_line strings with only\_if New Modules: - ini\_file module for manipulating INI files - new LSB facts (release, distro, etc) - pause module -- (pause seconds=10) (pause minutes=1) (pause prompt=foo) -- it's an action plugin - a module for adding entries to the main crontab (though you may still wish to just drop template files into cron.d) - debug module can be used for outputting messages without using 'shell echo' - a fail module is now available for causing errors, you might want to use it with only\_if to fail in certain conditions Other module Changes, Upgrades, and Fixes: - removes= exists on command just like creates= - postgresql modules now take an optional port= parameter - /proc/cmdline info is now available in Linux facts - public host key detection for OS X - lineinfile module now uses 'search' not exact 'match' in regexes, making it much more intuitive and not needing regex syntax most of the time - added force=yes\|no (default no) option for file module, which allows transition between files to directories and so on - additional facts for SunOS virtualization - copy module is now atomic when used across volumes - url\_get module now returns 'dest' with the location of the file saved - fix for yum module when using local RPMs vs downloading - cleaner error messages with copy if destination directory does not exist - setup module now still works if PATH is not set - service module status now correct for services with 'subsys locked' status - misc fixes/upgrades to the wait\_for module - git module now expands any "~" in provided destination paths - ignore stop error code failure for service module with state=restarted, always try to start - inline documentation for modules allows documentation source to built without pull requests to the ansible-docs project, among other things - variable '$ansible\_managed' is now great to include at the top of your templates and includes useful information and a warning that it will be replaced - "~" now expanded in command module when using creates/removes - mysql module can do dumps and imports - selinux policy is only required if setting to not disabled - various fixes for yum module when working with packages not in any present repo 0.7 "Panama" -- Sept 6 2012 --------------------------- Module changes: - login\_unix\_socket option for mysql user and database modules (see PR #781 for doc notes) - new modules -- pip, easy\_install, apt\_repository, supervisorctl - error handling for setup module when SELinux is in a weird state - misc yum module fixes - better changed=True/False detection in user module on older Linux distros - nicer errors from modules when arguments are not key=value - backup option on copy (backup=yes), as well as template, assemble, and lineinfile - file module will not recurse on directory properties - yum module now workable without having repoquery installed, but doesn't support comparisons or list= if so - setup module now detects interfaces with aliases - better handling of VM guest type detection in setup module - new module boilerplate code to check for mutually required arguments, arguments required together, exclusive args - add pattern= as a parameter to the service module (for init scripts that don't do status, or do poor status) - various fixes to mysql & postresql modules - added a thirsty= option (boolean, default no) to the get\_url module to decide to download the file every time or not - added a wait\_for module to poll for ports being open - added a nagios module for controlling outage windows and alert statuses - added a seboolean module for getsebool/setsebool type operations - added a selinux module for controlling overall SELinux policy - added a subversion module - added lineinfile for adding and removing lines from basic files - added facts for ARM-based CPUs - support for systemd in the service module - git moduleforce reset behavior is now controllable - file module can now operate on special files (block devices, etc) Core changes: - ansible --version will now give branch/SHA information if running from git - better sudo permissions when encountering different umasks - when using paramiko and SFTP is not accessible, do not traceback, but return a nice human readable msg - use -vvv for extreme debug levels. -v gives more playbook output as before - -vv shows module arguments to all module calls (and maybe some other things later) - don not pass "--" to sudo to work on older EL5 - make remote\_md5 internal function work with non-bash shells - allow user to be passed in via --extra-vars (regression) - add --limit option, which can be used to further confine the pattern given in ansible-playbooks - adds ranged patterns like dbservers[0-49] for usage with patterns or --limit - -u and user: defaults to current user, rather than root, override as before - /etc/ansible/ansible.cfg and ~/ansible.cfg now available to set default values and other things - (developers) ANSIBLE\_KEEP\_REMOTE\_FILES=1 can be used in debugging (envrionment variable) - (developers) connection types are now plugins - (developers) callbacks can now be extended via plugins - added FreeBSD ports packaging scripts - check for terminal properties prior to engaging color modes - explicitly disable password auth with -c ssh, as it is not used anyway Playbooks: - YAML syntax errors detected and show where the problem is - if you ctrl+c a playbook it will not traceback (usually) - vars\_prompt now has encryption options (see examples/playbooks/prompts.yml) - allow variables in parameterized task include parameters (regression) - add ability to store the result of any command in a register (see examples/playbooks/register\_logic.yml) - --list-hosts to show what hosts are included in each play of a playbook - fix a variable ordering issue that could affect vars\_files with selective file source lists - adds 'delegate\_to' for a task, which can be used to signal outage windows and load balancers on behalf of hosts - adds 'serial' to playbook, allowing you to specify how many hosts can be processing a playbook at one time (default 0=all) - adds 'local\_action: ' as an alias to 'delegate\_to: 127.0.0.1' 0.6 "Cabo" -- August 6, 2012 ---------------------------- playbooks: - support to tag tasks and includes and use --tags in playbook CLI - playbooks can now include other playbooks (example/playbooks/nested\_playbooks.yml) - vars\_files now usable with with\_items, provided file paths don't contain host specific facts - error reporting if with\_items value is unbound - with\_items no longer creates lots of tasks, creates one task that makes multiple calls - can use host\_specific facts inside with\_items (see above) - at the top level of a playbook, set 'gather\_facts: no' to skip fact gathering - first\_available\_file and with\_items used together will now raise an error - to catch typos, like 'var' for 'vars', playbooks and tasks now yell on invalid parameters - automatically load (directory\_of\_inventory\_file)/group\_vars/groupname and /host\_vars/hostname in vars\_files - playbook is now colorized, set ANSIBLE\_NOCOLOR=1 if you do not like this, does not colorize if not a TTY - hostvars now preserved between plays (regression in 0.5 from 0.4), useful for sharing vars in multinode configs - ignore\_errors: yes on a task can be used to allow a task to fail and not stop the play - with\_items with the apt/yum module will install/remove/update everything in a single command inventory: - groups variable available as a hash to return the hosts in each group name - in YAML inventory, hosts can list their groups in inverted order now also (see tests/yaml\_hosts) - YAML inventory is deprecated and will be removed in 0.7 - ec2 inventory script - support ranges of hosts in the host file, like www[001-100].example.com (supports leading zeros and also not) modules: - fetch module now does not fail a system when requesting file paths (ex: logs) that don't exist - apt module now takes an optional install-recommends=yes\|no (default yes) - fixes to the return codes of the copy module - copy module takes a remote md5sum to avoid large file transfer - various user and group module fixes (error handling, etc) - apt module now takes an optional force parameter - slightly better psychic service status handling for the service module - fetch module fixes for SSH connection type - modules now consistently all take yes/no for boolean parameters (and DWIM on true/false/1/0/y/n/etc) - setup module no longer saves to disk, template module now only used in playbooks - setup module no longer needs to run twice per playbook - apt module now passes DEBIAN\_FRONTEND=noninteractive - mount module (manages active mounts + fstab) - setup module fixes if no ipv6 support - internals: template in common module boilerplate, also causes less SSH operations when used - git module fixes - setup module overhaul, more modular - minor caching logic added to inventory to reduce hammering of inventory scripts. - MySQL and PostgreSQL modules for user and db management - vars\_prompt now supports private password entry (see examples/playbooks/prompts.yml) - yum module modified to be more tolerant of plugins spewing random console messages (ex: RHN) internals: - when sudoing to root, still use /etc/ansible/setup as the metadata path, as if root - paramiko is now only imported if needed when running from source checkout - cowsay support on Ubuntu - various ssh connection fixes for old Ubuntu clients - ./hacking/test-module now supports options like ansible takes and has a debugger mode - sudoing to a user other than root now works more seamlessly (uses /tmp, avoids umask issues) 0.5 "Amsterdam" ------- July 04, 2012 ------------------------------------- - Service module gets more accurate service states when running with upstart - Jinja2 usage in playbooks (not templates), reinstated, supports %include directive - support for --connection ssh (supports Kerberos, bastion hosts, etc), requires ControlMaster - misc tracebacks replaced with error messages - various API/internals refactoring - vars can be built from other variables - support for exclusion of hosts/groups with "!groupname" - various changes to support md5 tool differences for FreeBSD nodes & OS X clients - "unparseable" command output shows in command output for easier debugging - mktemp is no longer required on remotes (not available on BSD) - support for older versions of python-apt in the apt module - a new "assemble" module, for constructing files from pieces of files (inspired by Puppet "fragments" idiom) - ability to override most default values with ANSIBLE\_FOO environment variables - --module-path parameter can support multiple directories separated with the OS path separator - with\_items can take a variable of type list - ansible\_python\_interpreter variable available for systems with more than one Python - BIOS and VMware "fact" upgrades - cowsay is used by ansible-playbook if installed to improve output legibility (try installing it) - authorized\_key module - SELinux facts now sourced from the python selinux library - removed module debug option -D - added --verbose, which shows output from successful playbook operations - print the output of the raw command inside /usr/bin/ansible as with command/shell - basic setup module support for Solaris - ./library relative to the playbook is always in path so modules can be included in tarballs with playbooks 0.4 "Unchained" ------- May 23, 2012 ------------------------------------ Internals/Core \* internal inventory API now more object oriented, parsers decoupled \* async handling improvements \* misc fixes for running ansible on OS X (overlord only) \* sudo improvements, now works much more smoothly \* sudo to a particular user with -U/--sudo-user, or using 'sudo\_user: foo' in a playbook \* --private-key CLI option to work with pem files Inventory \* can use -i host1,host2,host3:port to specify hosts not in inventory (replaces --override-hosts) \* ansible INI style format can do groups of groups [groupname:children] and group vars [groupname:vars] \* groups and users module takes an optional system=yes\|no on creation (default no) \* list of hosts in playbooks can be expressed as a YAML list in addition to ; delimited Playbooks \* variables can be replaced like ${foo.nested\_hash\_key.nested\_subkey[array\_index]} \* unicode now ok in templates (assumes utf8) \* able to pass host specifier or group name in to "hosts:" with --extra-vars \* ansible-pull script and example playbook (extreme scaling, remediation) \* inventory\_hostname variable available that contains the value of the host as ansible knows it \* variables in the 'all' section can be used to define other variables based on those values \* 'group\_names' is now a variable made available to templates \* first\_available\_file feature, see selective\_file\_sources.yml in examples/playbooks for info \* --extra-vars="a=2 b=3" etc, now available to inject parameters into playbooks from CLI Incompatible Changes \* jinja2 is only usable in templates, not playbooks, use $foo instead \* --override-hosts removed, can use -i with comma notation (-i "ahost,bhost") \* modules can no longer include stderr output (paramiko limitation from sudo) Module Changes \* tweaks to SELinux implementation for file module \* fixes for yum module corner cases on EL5 \* file module now correctly returns the mode in octal \* fix for symlink handling in the file module \* service takes an enable=yes\|no which works with chkconfig or updates-rc.d as appropriate \* service module works better on Ubuntu \* git module now does resets and such to work more smoothly on updates \* modules all now log to syslog \* enabled=yes\|no on a service can be used to toggle chkconfig & updates-rc.d states \* git module supports branch= \* service fixes to better detect status using return codes of the service script \* custom facts provided by the setup module mean no dependency on Ruby, facter, or ohai \* service now has a state=reloaded \* raw module for bootstrapping and talking to routers w/o Python, etc Misc Bugfixes \* fixes for variable parsing in only\_if lines \* misc fixes to key=value parsing \* variables with mixed case now legal \* fix to internals of hacking/test-module development script 0.3 "Baluchitherium" -- April 23, 2012 -------------------------------------- - Packaging for Debian, Gentoo, and Arch - Improvements to the apt and yum modules - A virt module - SELinux support for the file module - Ability to use facts from other systems in templates (aka exported resources like support) - Built in Ansible facts so you don't need ohai, facter, or Ruby - tempdir selections that work with noexec mounted /tmp - templates happen locally, not remotely, so no dependency on python-jinja2 for remote computers - advanced inventory format in YAML allows more control over variables per host and per group - variables in playbooks can be structured/nested versus just a flat namespace - manpage upgrades (docs) - various bugfixes - can specify a default --user for playbooks rather than specifying it in the playbook file - able to specify ansible port in ansible host file (see docs) - refactored Inventory API to make it easier to write scripts using Ansible - looping capability for playbooks (with\_items) - support for using sudo with a password - module arguments can be unicode - A local connection type, --connection=local, for use with cron or in kickstarts - better module debugging with -D - fetch module for pulling in files from remote hosts - command task supports creates=foo for idempotent semantics, won't run if file foo already exists 0.0.2 and 0.0.1 --------------- - Initial stages of project ansible-2.5.1/changelogs/CHANGELOG-v2.0.rst0000644000000000000000000005320613265756155017735 0ustar rootroot00000000000000======================================================= Ansible 2.0 "Over the Hills and Far Away" Release Notes ======================================================= 2.0.3 "Over the Hills and Far Away" ----------------------------------- - Backport fix to uri module to return the body of an error response - Backport fix to uri module to handle file:/// uris. - Backport fix to uri module to fix traceback when handling certain server error types. 2.0.2 "Over the Hills and Far Away" ----------------------------------- - Backport of the 2.1 feature to ensure per-item callbacks are sent as they occur, rather than all at once at the end of the task. - Fixed bugs related to the iteration of tasks when certain combinations of roles, blocks, and includes were used, especially when handling errors in rescue/always portions of blocks. - Fixed handling of redirects in our helper code, and ported the uri module to use this helper code. This removes the httplib dependency for this module while fixing some bugs related to redirects and SSL certs. - Fixed some bugs related to the incorrect creation of extra temp directories for uploading files, which were not cleaned up properly. - Improved error reporting in certain situations, to provide more information such as the playbook file/line. - Fixed a bug related to the variable precedence of role parameters, especially when a role may be used both as a dependency of a role and directly by itself within the same play. - Fixed some bugs in the 2.0 implementation of do/until. - Fixed some bugs related to run\_once: - Ensure that all hosts are marked as failed if a task marked as run\_once fails. - Show a warning when using the free strategy when a run\_once task is encountered, as there is no way for the free strategy to guarantee the task is not run more than once. - Fixed a bug where the assemble module was not honoring check mode in some situations. - Fixed a bug related to delegate\_to, where we were incorrectly using variables from the inventory host rather than the delegated-to host. - The 'package' meta-module now properly squashes items down to a single execution (as the apt/yum/other package modules do). - Fixed a bug related to the ansible-galaxy CLI command dealing with paged results from the Galaxy server. - Pipelining support is now available for the local and jail connection plugins, which is useful for users who do not wish to have temp files/directories created when running tasks with these connection types. - Improvements in support for additional shell types. - Improvements in the code which is used to calculate checksums for remote files. - Some speed ups and bug fixes related to the variable merging code. - Workaround bug in python subprocess on El Capitan that was making vault fail when attempting to encrypt a file - Fix lxc\_container module having predictable temp file names and setting file permissions on the temporary file too leniently on a temporary file that was executed as a script. Addresses CVE-2016-3096 - Fix a bug in the uri module where setting headers via module params that start with ``HEADER_`` were causing a traceback. - Fix bug in the free strategy that was causing it to synchronize its workers after every task (making it a lot more like linear than it should have been). 2.0.1 "Over the Hills and Far Away" ----------------------------------- - Fixes a major compatibility break in the synchronize module shipped with 2.0.0.x. That version of synchronize ran sudo on the controller prior to running rsync. In 1.9.x and previous, sudo was run on the host that rsync connected to. 2.0.1 restores the 1.9.x behaviour. - Additionally, several other problems with where synchronize chose to run when combined with delegate\_to were fixed. In particular, if a playbook targetted localhost and then delegated\_to a remote host the prior behavior (in 1.9.x and 2.0.0.x) was to copy files between the src and destination directories on the delegated host. This has now been fixed to copy between localhost and the delegated host. - Fix a regression where synchronize was unable to deal with unicode paths. - Fix a regression where synchronize deals with inventory hosts that use localhost but with an alternate port. - Fixes a regression where the retry files feature was not implemented. - Fixes a regression where the any\_errors\_fatal option was implemented in 2.0 incorrectly, and also adds a feature where any\_errors\_fatal can be set at the block level. - Fix tracebacks when playbooks or ansible itself were located in directories with unicode characters. - Fix bug when sending unicode characters to an external pager for display. - Fix a bug with squashing loops for special modules (mostly package managers). The optimization was squashing when the loop did not apply to the selection of packages. This has now been fixed. - Temp files created when using vault are now "shredded" using the unix shred program which overwrites the file with random data. - Some fixes to cloudstack modules for case sensitivity - Fix non-newstyle modules (non-python modules and old-style modules) to disabled pipelining. - Fix fetch module failing even if fail\_on\_missing is set to False - Fix for cornercase when local connections, sudo, and raw were used together. - Fix dnf module to remove dependent packages when state=absent is specified. This was a feature of the 1.9.x version that was left out by mistake when the module was rewritten for 2.0. - Fix bugs with non-english locales in yum, git, and apt modules - Fix a bug with the dnf module where state=latest could only upgrade, not install. - Fix to make implicit fact gathering task correctly inherit settings from play, this might cause an error if settings environment on play depending on 'ansible\_env' which was previously ignored 2.0 "Over the Hills and Far Away" - Jan 12, 2016 ------------------------------------------------ Major Changes: ~~~~~~~~~~~~~~ - Releases are now named after Led Zeppelin songs, 1.9 will be the last Van Halen named release. - The new block/rescue/always directives allow for making task blocks and exception-like semantics - New strategy plugins (e.g. ``free``) allow control over the flow of task execution per play. The default (``linear``) will be the same as before. - Improved error handling, with more detailed parser messages. General exception handling and display has been revamped. - Task includes are now evaluated during execution, allowing more dynamic includes and options. Play includes are unchanged both still use the ``include`` directive. - "with\_" loops can now be used with task includes since they are dynamic. - Callback, connection, cache and lookup plugin APIs have changed. Existing plugins might require modification to work with the new versions. - Callbacks are now shipped in the active directory and don't need to be copied, just whitelisted in ansible.cfg. - Many API changes. Those integrating directly with Ansible's API will encounter breaking changes, but the new API is much easier to use and test. - Settings are now more inheritable; what you set at play, block or role will be automatically inherited by the contained tasks. This allows for new features to automatically be settable at all levels, previously we had to manually code this. - Vars are now settable at play, block, role and task level with the ``vars`` directive and scoped to the tasks contained. - Template code now retains types for bools and numbers instead of turning them into strings. If you need the old behaviour, quote the value and it will get passed around as a string - Empty variables and variables set to null in yaml will no longer be converted to empty strings. They will retain the value of ``None``. To go back to the old behaviour, you can override the ``null_representation`` setting to an empty string in your config file or by setting the ``ANSIBLE_NULL_REPRESENTATION`` environment variable. - Added ``meta: refresh_inventory`` to force rereading the inventory in a play. This re-executes inventory scripts, but does not force them to ignore any cache they might use. - New delegate\_facts directive, a boolean that allows you to apply facts to the delegated host (true/yes) instead of the inventory\_hostname (no/false) which is the default and previous behaviour. - local connections now work with 'su' as a privilege escalation method - Ansible 2.0 has deprecated the "ssh" from ansible\_ssh\_user, ansible\_ssh\_host, and ansible\_ssh\_port to become ansible\_user, ansible\_host, and ansible\_port. - New ssh configuration variables (``ansible_ssh_common_args``, ``ansible_ssh_extra_args``) can be used to configure a per-group or per-host ssh ProxyCommand or set any other ssh options. ``ansible_ssh_extra_args`` is used to set options that are accepted only by ssh (not sftp or scp, which have their own analogous settings). - ansible-pull can now verify the code it runs when using git as a source repository, using git's code signing and verification features. - Backslashes used when specifying parameters in jinja2 expressions in YAML dicts sometimes needed to be escaped twice. This has been fixed so that escaping once works. Here's an example of how playbooks need to be modified: .. code:: yaml # Syntax in 1.9.x - debug: msg: "{{ 'test1_junk 1\\\\3' | regex_replace('(.*)_junk (.*)', '\\\\1 \\\\2') }}" # Syntax in 2.0.x - debug: msg: "{{ 'test1_junk 1\\3' | regex_replace('(.*)_junk (.*)', '\\1 \\2') }}" # Output: "msg": "test1 1\\3" - When a string with a trailing newline was specified in the playbook via yaml dict format, the trailing newline was stripped. When specified in key=value format the trailing newlines were kept. In v2, both methods of specifying the string will keep the trailing newlines. If you relied on the trailing newline being stripped you can change your playbook like this: .. code:: yaml # Syntax in 1.9.2 vars: message: > Testing some things tasks: - debug: msg: "{{ message }}" # Syntax in 2.0.x vars: old_message: > Testing some things message: "{{ old_messsage[:-1] }}" - debug: msg: "{{ message }}" # Output "msg": "Testing some things" - In 1.9.x, newlines in templates were converted to Unix EOL conventions. If someone wanted a templated file to end up with Windows or Mac EOL conventions, this could cause problems for them. In 2.x newlines now remain as specified in the template file. - When specifying complex args as a variable, the variable must use the full jinja2 variable syntax ('{{var\_name}}') - bare variable names there are no longer accepted. In fact, even specifying args with variables has been deprecated, and will not be allowed in future versions: .. code:: yaml --- - hosts: localhost connection: local gather_facts: false vars: my_dirs: - { path: /tmp/3a, state: directory, mode: 0755 } - { path: /tmp/3b, state: directory, mode: 0700 } tasks: - file: args: "{{item}}" with_items: my_dirs - The bigip\* networking modules have a new parameter, validate\_certs. When True (the default) the module will validate any hosts it connects to against the TLS certificates it presents when run on new enough python versions. If the python version is too old to validate certificates or you used certificates that cannot be validated against available CAs you will need to add validate\_certs=no to your playbook for those tasks. Plugins ~~~~~~~ - Rewritten dnf module that should be faster and less prone to encountering bugs in cornercases - WinRM connection plugin passes all vars named ``ansible_winrm_*`` to the underlying pywinrm client. This allows, for instance, ``ansible_winrm_server_cert_validation=ignore`` to be used with newer versions of pywinrm to disable certificate validation on Python 2.7.9+. - WinRM connection plugin put\_file is significantly faster and no longer has file size limitations. Deprecated Modules (new ones in parens): ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - ec2\_ami\_search (ec2\_ami\_find) - quantum\_network (os\_network) - glance\_image - nova\_compute (os\_server) - quantum\_floating\_ip (os\_floating\_ip) - quantum\_router (os\_router) - quantum\_router\_gateway (os\_router) - quantum\_router\_interface (os\_router) New Modules: ^^^^^^^^^^^^ - amazon: ec2\_ami\_copy - amazon: ec2\_ami\_find - amazon: ec2\_elb\_facts - amazon: ec2\_eni - amazon: ec2\_eni\_facts - amazon: ec2\_remote\_facts - amazon: ec2\_vpc\_igw - amazon: ec2\_vpc\_net - amazon: ec2\_vpc\_net\_facts - amazon: ec2\_vpc\_route\_table - amazon: ec2\_vpc\_route\_table\_facts - amazon: ec2\_vpc\_subnet - amazon: ec2\_vpc\_subnet\_facts - amazon: ec2\_win\_password - amazon: ecs\_cluster - amazon: ecs\_task - amazon: ecs\_taskdefinition - amazon: elasticache\_subnet\_group\_facts - amazon: iam - amazon: iam\_cert - amazon: iam\_policy - amazon: route53\_facts - amazon: route53\_health\_check - amazon: route53\_zone - amazon: sts\_assume\_role - amazon: s3\_bucket - amazon: s3\_lifecycle - amazon: s3\_logging - amazon: sqs\_queue - amazon: sns\_topic - amazon: sts\_assume\_role - apk - bigip\_gtm\_wide\_ip - bundler - centurylink: clc\_aa\_policy - centurylink: clc\_alert\_policy - centurylink: clc\_blueprint\_package - centurylink: clc\_firewall\_policy - centurylink: clc\_group - centurylink: clc\_loadbalancer - centurylink: clc\_modify\_server - centurylink: clc\_publicip - centurylink: clc\_server - centurylink: clc\_server\_snapshot - circonus\_annotation - consul - consul\_acl - consul\_kv - consul\_session - cloudtrail - cloudstack: cs\_account - cloudstack: cs\_affinitygroup - cloudstack: cs\_domain - cloudstack: cs\_facts - cloudstack: cs\_firewall - cloudstack: cs\_iso - cloudstack: cs\_instance - cloudstack: cs\_instancegroup - cloudstack: cs\_ip\_address - cloudstack: cs\_loadbalancer\_rule - cloudstack: cs\_loadbalancer\_rule\_member - cloudstack: cs\_network - cloudstack: cs\_portforward - cloudstack: cs\_project - cloudstack: cs\_sshkeypair - cloudstack: cs\_securitygroup - cloudstack: cs\_securitygroup\_rule - cloudstack: cs\_staticnat - cloudstack: cs\_template - cloudstack: cs\_user - cloudstack: cs\_vmsnapshot - cronvar - datadog\_monitor - deploy\_helper - docker: docker\_login - dpkg\_selections - elasticsearch\_plugin - expect - find - google: gce\_tag - hall - ipify\_facts - iptables - libvirt: virt\_net - libvirt: virt\_pool - maven\_artifact - openstack: os\_auth - openstack: os\_client\_config - openstack: os\_image - openstack: os\_image\_facts - openstack: os\_floating\_ip - openstack: os\_ironic - openstack: os\_ironic\_node - openstack: os\_keypair - openstack: os\_network - openstack: os\_network\_facts - openstack: os\_nova\_flavor - openstack: os\_object - openstack: os\_port - openstack: os\_project - openstack: os\_router - openstack: os\_security\_group - openstack: os\_security\_group\_rule - openstack: os\_server - openstack: os\_server\_actions - openstack: os\_server\_facts - openstack: os\_server\_volume - openstack: os\_subnet - openstack: os\_subnet\_facts - openstack: os\_user - openstack: os\_user\_group - openstack: os\_volume - openvswitch\_db. - osx\_defaults - pagerduty\_alert - pam\_limits - pear - profitbricks: profitbricks - profitbricks: profitbricks\_datacenter - profitbricks: profitbricks\_nic - profitbricks: profitbricks\_volume - profitbricks: profitbricks\_volume\_attachments - profitbricks: profitbricks\_snapshot - proxmox: proxmox - proxmox: proxmox\_template - puppet - pushover - pushbullet - rax: rax\_clb\_ssl - rax: rax\_mon\_alarm - rax: rax\_mon\_check - rax: rax\_mon\_entity - rax: rax\_mon\_notification - rax: rax\_mon\_notification\_plan - rabbitmq\_binding - rabbitmq\_exchange - rabbitmq\_queue - selinux\_permissive - sendgrid - sensu\_check - sensu\_subscription - seport - slackpkg - solaris\_zone - taiga\_issue - vertica\_configuration - vertica\_facts - vertica\_role - vertica\_schema - vertica\_user - vmware: vca\_fw - vmware: vca\_nat - vmware: vmware\_cluster - vmware: vmware\_datacenter - vmware: vmware\_dns\_config - vmware: vmware\_dvs\_host - vmware: vmware\_dvs\_portgroup - vmware: vmware\_dvswitch - vmware: vmware\_host - vmware: vmware\_migrate\_vmk - vmware: vmware\_portgroup - vmware: vmware\_target\_canonical\_facts - vmware: vmware\_vm\_facts - vmware: vmware\_vm\_vss\_dvs\_migrate - vmware: vmware\_vmkernel - vmware: vmware\_vmkernel\_ip\_config - vmware: vmware\_vsan\_cluster - vmware: vmware\_vswitch - vmware: vsphere\_copy - webfaction\_app - webfaction\_db - webfaction\_domain - webfaction\_mailbox - webfaction\_site - win\_acl - win\_dotnet\_ngen - win\_environment - win\_firewall\_rule - win\_iis\_virtualdirectory - win\_iis\_webapplication - win\_iis\_webapppool - win\_iis\_webbinding - win\_iis\_website - win\_lineinfile - win\_nssm - win\_package - win\_regedit - win\_scheduled\_task - win\_unzip - win\_updates - win\_webpicmd - xenserver\_facts - zabbix\_host - zabbix\_hostmacro - zabbix\_screen - znode New Inventory scripts: ^^^^^^^^^^^^^^^^^^^^^^ - cloudstack - fleetctl - openvz - nagios\_ndo - nsot - proxmox - rudder - serf New Lookups: ^^^^^^^^^^^^ - credstash - hashi\_vault - ini - shelvefile New Filters: ^^^^^^^^^^^^ - combine New Connection: ^^^^^^^^^^^^^^^ - docker: for talking to docker containers on the ansible controller machine without using ssh. New Callbacks: ^^^^^^^^^^^^^^ - logentries: plugin to send play data to logentries service - skippy: same as default but does not display skip messages Minor changes: ~~~~~~~~~~~~~~ - Many more tests. The new API makes things more testable and we took advantage of it. - big\_ip modules now support turning off ssl certificate validation (use only for self-signed certificates). - Consolidated code from modules using urllib2 to normalize features, TLS and SNI support. - synchronize module's dest\_port parameter now takes precedence over the ansible\_ssh\_port inventory setting. - Play output is now dynamically sized to terminal with a minimum of 80 coluumns (old default). - vars\_prompt and pause are now skipped with a warning if the play is called noninteractively (i.e. pull from cron). - Support for OpenBSD's 'doas' privilege escalation method. - Most vault operations can now be done over multilple files. - ansible-vault encrypt/decrypt read from stdin if no other input file is given, and can write to a given ``--output file`` (including stdout, '-'). This lets you avoid ever writing sensitive plaintext to disk. - ansible-vault rekey accepts the --new-vault-password-file option. - ansible-vault now preserves file permissions on edit and rekey and defaults to restrictive permissions for other options. - Configuration items defined as paths (local only) now all support shell style interpolations. - Many fixes and new options added to modules, too many to list here. - Now you can see task file and line number when using verbosity of 3 or above. - The ``[x-y]`` host range syntax is no longer supported. Note that ``[0:1]`` matches two hosts, i.e. the range is inclusive of its endpoints. - We now recommend the use of ``pattern1,pattern2`` to combine host matching patterns. - The use of ':' as a separator conflicts with IPv6 addresses and host ranges. It will be deprecated in the future. - The undocumented use of ';' as a separator is now deprecated. - modules and callbacks have been extended to support no\_log to avoid data disclosure. - new managed\_syslog option has been added to control output to syslog on managed machines, no\_log supersedes this settings. - Lookup, vars and action plugin pathing has been normalized, all now follow the same sequence to find relative files. - We do not ignore the explicitly set login user for ssh when it matches the 'current user' anymore, this allows overriding .ssh/config when it is set explicitly. Leaving it unset will still use the same user and respect .ssh/config. This also means ansible\_ssh\_user can now return a None value. - environment variables passed to remote shells now default to 'controller' settings, with fallback to en\_US.UTF8 which was the previous default. - add\_hosts is much stricter about host name and will prevent invalid names from being added. - ansible-pull now defaults to doing shallow checkouts with git, use ``--full`` to return to previous behaviour. - random cows are more random - when: now gets the registered var after the first iteration, making it possible to break out of item loops - Handling of undefined variables has changed. In most places they will now raise an error instead of silently injecting an empty string. Use the default filter if you want to approximate the old behaviour: :: - debug: msg="The error message was: {{error_code |default('') }}" - The yum module's detection of installed packages has been made more robust by using /usr/bin/rpm in cases where it woud have used repoquery before. - The pip module now properly reports changes when packages are coming from a VCS. - Fixes for retrieving files over https when a CONNECT-only proxy is in the middle. ansible-2.5.1/changelogs/CHANGELOG-v2.1.rst0000644000000000000000000003424513265756155017740 0ustar rootroot00000000000000===================================================== Ansible 2.1 "The Song Remains the Same" Release Notes ===================================================== 2.1.6 "The Song Remains the Same" - 06-01-2017 ---------------------------------------------- - Security fix for CVE-2017-7481 - data for lookup plugins used as variables was not being correctly marked as "unsafe". 2.1.5 "The Song Remains the Same" - 03-27-2017 ---------------------------------------------- - Security continued fix for CVE-2016-9587 - Handle some additional corner cases in the way conditionals are parsed and evaluated. 2.1.4 "The Song Remains the Same" - 2017-01-16 ---------------------------------------------- - Security fix for CVE-2016-9587 - An attacker with control over a client system being managed by Ansible and the ability to send facts back to the Ansible server could use this flaw to execute arbitrary code on the Ansible server as the user and group Ansible is running as. - Fixed a bug with conditionals in loops, where undefined variables and other errors will defer raising the error until the conditional has been evaluated. - Added a version check for jinja2-2.9, which does not fully work with Ansible currently. 2.1.3 "The Song Remains the Same" - 2016-11-04 ---------------------------------------------- - Security fix for CVE-2016-8628 - Command injection by compromised server via fact variables. In some situations, facts returned by modules could overwrite connection-based facts or some other special variables, leading to injected commands running on the Ansible controller as the user running Ansible (or via escalated permissions). - Security fix for CVE-2016-8614 - apt\_key module not properly validating keys in some situations. Minor Changes: ~~~~~~~~~~~~~~ - The subversion module from core now marks its password parameter as no\_log so the password is obscured when logging. - The postgresql\_lang and postgresql\_ext modules from extras now mark login\_password as no\_log so the password is obscured when logging. - Fixed several bugs related to locating files relative to role/playbook directories. - Fixed a bug in the way hosts were tested for failed states, resulting in incorrectly skipped block sessions. - Fixed a bug in the way our custom JSON encoder is used for the ``to_json*`` filters. - Fixed some bugs related to the use of non-ascii characters in become passwords. - Fixed a bug with Azure modules which may be using the latest rc6 library. - Backported some docker\_common fixes. 2.1.2 "The Song Remains the Same" - 2016-09-29 ---------------------------------------------- Minor Changes ~~~~~~~~~~~~~ - Fixed a bug related to creation of retry files (#17456) - Fixed a bug in the way include params are used when an include task is dynamic (#17064) - Fixed a bug related to including blocks in an include task (#15963) - Fixed a bug related to the use of hostvars internally when creating the connection plugin. This prevents things like variables using lookups from being evaluated unnecessarily (#17024) - Fixed a bug where using a variable containing a list for the ``hosts`` of a play resulted in an list of lists (#16583) - Fixed a bug where integer values would cause an error if a module param was of type ``float`` (no issue) - Fixed a bug with net\_template failing if src was not specified (#17726) - Fixed a bug in "ansible-galaxy import" (#17417) - Fixed a bug in which INI files incorrectly treated a hosts range as a section header (#15331) - Fixed a bug in which the max\_fail\_percentage calculation erroneously caused a series of plays to stop executing (#15954) - Fixed a bug in which the task names were not properly templated (#16295) - Fixed a bug causing "squashed" loops (ie. yum, apt) to incorrectly report results (ansible-modules-core#4214) - Fixed several bugs related to includes: - when including statically, make sure that all parents were also included statically (issue #16990) - properly resolve nested static include paths - print a message when a file is statically included - Fixed a bug in which module params expected to be float types were not converted from integers (only strings) (#17325) - Fixed a bug introduced by static includes in 2.1, which prevented notifications from going to the "top level" handler name. - Fixed a bug where a group\_vars or host\_vars directory in the current working directory would be used (and would take precedence) over those in the inventory and/or playbook directory. - Fixed a bug which could occur when the result of an async task did not parse as valid JSON. - (re)-allowed the use of ansible\_python\_interpreter lines with more than one argument. - Fixed several bugs related to the creation of the implicit localhost in inventory. - Fixed a bug related to an unspecified number of retries when using until. - Fixed a race-condition bug when creating temp directories before the worker process is forked. - Fix a bug with async's poll keyword not making use of ansible\_python\_interpreter to run (and thus breaking when /usr/bin/python is not present on the remote machine.) - Fix a bug where hosts that started with a range in inventory were being treated as an invalid section header. Module fixes: \* Fixed a bug where the temporary CA files created by the module helper code were not being deleted properly in some situations (#17073) \* Fixed many bugs in the unarchive module \* Fixes for module ec2: - Fixed a bug related to source\_dest\_check when used with non-vpc instances (core#3243) - Fixed a bug in ec2 where instances were not powering of when referenced via tags only (core#4765) - Fixed a bug where instances with multiple interfaces were not powering up/down correctly (core#3234) \* Fixes for module get\_url: - Fixed a bug in get\_url module to force a download if there is a checksum mismatch regardless of the last modified time (core#4262) - Fixed a bug in get\_url module to properly process FTP results (core#3661 and core#4601) \* Fixed a bug in win\_user related to users with disabled accounts/expired passwords (core#4369) \* ini\_file: - Fixed a bug where option lines are now inserted before blank lines. - Fixed a bug where leading whitespace prevented matches on options. \* Fixed a bug in iam\_cert when dup\_ok is used as a string. \* Fixed a bug in postgresql\_db related to the changed logic when state=absent. \* Fixed a bug where single\_transaction and quick were not passed into db\_dump for the mysql\_db module. \* Fixed a bug where the fetch module was not idempotent when retrieving the target of a symlink. \* Many minor fixes for bugs in extras modules. Deprecations ~~~~~~~~~~~~ - Deprecated the use of ``_fixup_perms``. Use ``_fixup_perms2`` instead. This change only impacts custom action plugins using ``_fixup_perms``. Incompatible Changes ~~~~~~~~~~~~~~~~~~~~ - Use of ``_fixup_perms`` with ``recursive=True`` (the default) is no longer supported. Custom action plugins using ``_fixup_perms`` will require changes unless they already use ``recursive=False``. Use ``_fixup_perms2`` if support for previous releases is not required. Otherwise use ``_fixup_perms`` with ``recursive=False``. 2.1 "The Song Remains the Same" ------------------------------- Major Changes: ~~~~~~~~~~~~~~ - Official support for the networking modules, originally available in 2.0 as a tech preview. - Refactored and expanded support for Docker with new modules and many improvements to existing modules, as well as a new Kubernetes module. - Added new modules for Azure (see below for the full list) - Added the ability to specify includes as "static" (either through a configuration option or on a per-include basis). When includes are static, they are loaded at compile time and cannot contain dynamic features like loops. - Added a new strategy ``debug``, which allows per-task debugging of playbooks, for more details see https://docs.ansible.com/ansible/playbooks\_debugger.html - Added a new option for tasks: ``loop_control``. This currently only supports one option - ``loop_var``, which allows a different loop variable from ``item`` to be used. - Added the ability to filter facts returned by the fact gathering setup step using the ``gather_subset`` option on the play or in the ansible.cfg configuration file. See http://docs.ansible.com/ansible/intro\_configuration.html#gathering for details on the format of the option. - Added the ability to send per-item callbacks, rather than a batch update (this more closely resembles the behavior of Ansible 1.x). - Added facility for modules to send back 'diff' for display when ansible is called with --diff, updated several modules to return this info - Added ansible-console tool, a REPL shell that allows running adhoc tasks against a chosen inventory (based on https://github.com/dominis/ansible-shell) - Added two new variables, which are set when the ``rescue`` portion of a ``block`` is started: - ``ansible_failed_task``, which contains the serialized version of the failed task. - ``ansible_failed_result``, which contains the result of the failed task. - New meta action, ``meta: clear_host_errors`` which will clear any hosts which were marked as failed (but not unreachable hosts). - New meta action, ``meta: clear_facts`` which will remove existing facts for the current host from current memory and facts cache. - copy module can now transparently use a vaulted file as source, if vault passwords were provided it will decrypt and copy on the fly. - The way new-style python modules (which include all of the non-windows modules shipped with Ansible) are assembled before execution on the remote machine has been changed. The new way stays closer to how python imports modules which will make it easier to write modules which rely heavily on shared code. - Reduce the situations in which a module can end up as world readable. For details, see: https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user - Re-implemented the retry file feature, which had been left out of 2.0 (fix was backported to 2.0.1 originally). - Improved winrm argument validation and feature sniffing (for upcoming pywinrm NTLM support). - Improved winrm error handling: basic parsing of stderr from CLIXML stream. New Modules ^^^^^^^^^^^ - aws - ec2\_vol\_facts - ec2\_vpc\_dhcp\_options - ec2\_vpc\_net\_facts - ec2\_snapshot\_facts - azure: - azure\_rm\_deployment - azure\_rm\_networkinterface - azure\_rm\_networkinterface\_facts (TECH PREVIEW) - azure\_rm\_publicipaddress - azure\_rm\_publicipaddress\_facts (TECH PREVIEW) - azure\_rm\_resourcegroup - azure\_rm\_resourcegroup\_facts (TECH PREVIEW) - azure\_rm\_securitygroup - azure\_rm\_securitygroup\_facts (TECH PREVIEW) - azure\_rm\_storageaccount - azure\_rm\_storageaccount\_facts (TECH PREVIEW) - azure\_rm\_storageblob - azure\_rm\_subnet - azure\_rm\_virtualmachine - azure\_rm\_virtualmachineimage\_facts (TECH PREVIEW) - azure\_rm\_virtualnetwork - azure\_rm\_virtualnetwork\_facts (TECH PREVIEW) - cloudflare\_dns - cloudstack - cs\_cluster - cs\_configuration - cs\_instance\_facts - cs\_pod - cs\_resourcelimit - cs\_volume - cs\_zone - cs\_zone\_facts - clustering - kubernetes - cumulus - cl\_bond - cl\_bridge - cl\_img\_install - cl\_interface - cl\_interface\_policy - cl\_license - cl\_ports - eos - eos\_command - eos\_config - eos\_eapi - eos\_template - gitlab - gitlab\_group - gitlab\_project - gitlab\_user - ios - ios\_command - ios\_config - ios\_template - iosxr - iosxr\_command - iosxr\_config - iosxr\_template - junos - junos\_command - junos\_config - junos\_facts - junos\_netconf - junos\_package - junos\_template - make - mongodb\_parameter - nxos - nxos\_command - nxos\_config - nxos\_facts - nxos\_feature - nxos\_interface - nxos\_ip\_interface - nxos\_nxapi - nxos\_ping - nxos\_switchport - nxos\_template - nxos\_vlan - nxos\_vrf - nxos\_vrf\_interface - nxos\_vrrp - openstack - os\_flavor\_facts - os\_group - os\_ironic\_inspect - os\_keystone\_domain\_facts - os\_keystone\_role - os\_port\_facts - os\_project\_facts - os\_user\_facts - os\_user\_role - openswitch - ops\_command - ops\_config - ops\_facts - ops\_template - softlayer - sl\_vm - vmware - vmware\_maintenancemode - vmware\_vm\_shell - windows - win\_acl\_inheritance - win\_owner - win\_reboot - win\_regmerge - win\_timezone - yum\_repository New Strategies ^^^^^^^^^^^^^^ - debug New Filters ^^^^^^^^^^^ - extract - ip4\_hex - regex\_search - regex\_findall New Callbacks ^^^^^^^^^^^^^ - actionable (only shows changed and failed) - slack - json New Tests ^^^^^^^^^ - issubset - issuperset New Inventory scripts: ^^^^^^^^^^^^^^^^^^^^^^ - brook - rackhd - azure\_rm Minor Changes: ~~~~~~~~~~~~~~ - Added support for pipelining mode to more connection plugins, which helps prevent module data from being written to disk. - Added a new '!unsafe' YAML decorator, which can be used in playbooks to ensure a string is not templated. For example: ``foo: !unsafe "Don't template {{me}}"``. - Callbacks now have access to the options with which the CLI was called - Debug now has verbosity option to control when to display by matching number of -v in command line - Modules now get verbosity, diff and other flags as passed to ansible - Mount facts now also show 'network mounts' that use the pattern ``:/`` - Plugins are now sorted before loading. This means, for instance, if you want two custom callback plugins to run in a certain order you can name them 10-first-callback.py and 20-second-callback.py. - Added (alpha) Centirfy's dzdo as another become meethod (privilege escalation) Deprecations: ~~~~~~~~~~~~~ - Deprecated the use of "bare" variables in loops (ie. ``with_items: foo``, where ``foo`` is a variable). The full jinja2 variable syntax of ``{{foo}}`` should always be used instead. This warning will be removed completely in 2.3, after which time it will be an error. - play\_hosts magic variable, use ansible\_play\_batch or ansible\_play\_hosts instead. ansible-2.5.1/changelogs/CHANGELOG-v2.2.rst0000644000000000000000000004125013265756155017733 0ustar rootroot00000000000000================================================== Ansible 2.2 "The Battle of Evermore" Release Notes ================================================== 2.2.4 "The Battle of Evermore" - TBD ------------------------------------ - avoid vault view writing to logs - moved htpasswd module to use LooseVersion vs StrictVersion to make usable on Debian - fix for adhoc not obeying callback options 2.2.3 "The Battle of Evermore" - 05-09-2017 ------------------------------------------- Major Changes: ~~~~~~~~~~~~~~ - [SECURITY] (HIGH): fix for CVE-2017-7466, which was caused by an incomplete cherry-picking of commits related to CVE-2016-9587. This can lead to some jinja2 syntax not being stripped out of templated results. - [SECURITY] (MODERATE): fix for CVE-2017-7481, in which data for lookup plugins used as variables was not being correctly marked as "unsafe". Minor Changes: ~~~~~~~~~~~~~~ - Fixes a bug when using YAML inventory where hosts were not put in the 'all' group, and some other 'ungrouped' issues in inventory. - Fixes a bug when using ansible commands without a tty for stdin. - Split on newlines when searching for become prompt. - Fix crash upon pass prompt in py3 when using the paramiko connection type. 2.2.2 "The Battle of Evermore" - 03-27-2017 ------------------------------------------- Major Changes: ~~~~~~~~~~~~~~ - [SECURITY] (HIGH): (continued fix for CVE-2016-9587) Handle some additional corner cases in the way conditionals are parsed and evaluated. - [SECURITY] (LOW): properly filter passwords out of URLs when displaying output from some modules. Minor Changes: ~~~~~~~~~~~~~~ - Fix azure\_rm version checks (#22270). - Fix for traceback when we encounter non-utf8 characters when using --diff. - Ensure ssh hostkey checks respect server port. - Use proper PyYAML classes for safe loading YAML files. - Fix for bug related to when statements for older jinja2 versions. - Fix a bug/traceback when using to\_yaml/to\_nice\_yaml. - Properly clean data of jinja2-like syntax, even if that data came from an unsafe source. - Fix bug regarding undefined entries in HostVars. - Skip fact gathering if the entire play was included via conditional which evaluates to False. - Fixed a performance regression when using a large number of items in a with loop. - Fixed a bug in the way the end of role was detected, which in some cases could cause a role to be run more than once. - Add jinja2 groupby filter override to cast namedtuple to tuple to handle a non-compatible change in jinja2 2.9.4-2.9.5. - Fixed several bugs related to temp directory creation on remote systems when using shell expansions and become privilege escalation. - Fixed a bug related to spliting/parsing the output of a become privilege escalation when looking for a password prompt. - Several unicode/bytes fixes. 2.2.1 "The Battle of Evermore" - 01-16-2017 ------------------------------------------- Major Changes: ~~~~~~~~~~~~~~ - Security fix for CVE-2016-9587 - An attacker with control over a client system being managed by Ansible and the ability to send facts back to the Ansible server could use this flaw to execute arbitrary code on the Ansible server as the user and group Ansible is running as. Minor Changes ~~~~~~~~~~~~~ - Fixes a bug where undefined variables in with\_\* loops would cause a task failure even if the when condition would cause the task to be skipped. - Fixed a bug related to roles where in certain situations a role may be run more than once despite not allowing duplicates. - Fixed some additional bugs related to atomic\_move for modules. - Fixes multiple bugs related to field/attribute inheritance in nested blocks and includes, as well as task iteration logic during failures. - Fixed pip installing packages into virtualenvs using the system pip instead of the virtualenv pip. - Fixed dnf on systems with dnf-2.0.x (some changes in the API). - Fixed traceback with dnf install of groups. - Fixes a bug in which include\_vars was not working with failed\_when. - Fix for include\_vars only loading files with .yml, .yaml, and .json extensions. This was only supposed to apply to loading a directory of vars files. - Fixes several bugs related to properly incrementing the failed count in the host statistics. - Fixes a bug with listening handlers which did not specify a ``name`` field. - Fixes a bug with the ``play_hosts`` internal variable, so that it properly reflects the current list of hosts. - Fixes a bug related to the v2\_playbook\_on\_start callback method and legacy (v1) plugins. - Fixes an openssh related process exit race condition, related to the fact that connections using ControlPersist do not close stderr. - Improvements and fixes to OpenBSD fact gathering. - Updated ``make deb`` to use pbuilder. Use ``make local_deb`` for the previous non-pbuilder build. - Fixed Windows async to avoid blocking due to handle inheritance. - Fixed bugs in the mount module on older Linux kernels and \*BSDs - Fix regression in jinja2 include search path. - Various minor fixes for Python 3 - Inserted some checks for jinja2-2.9, which can cause some issues with Ansible currently. 2.2 "The Battle of Evermore" - 11-01-2016 ----------------------------------------- Major Changes: ~~~~~~~~~~~~~~ - Security fix for CVE-2016-8628 - Command injection by compromised server via fact variables. In some situations, facts returned by modules could overwrite connection-based facts or some other special variables, leading to injected commands running on the Ansible controller as the user running Ansible (or via escalated permissions). - Security fix for CVE-2016-8614 - apt\_key module not properly validating keys in some situations. - Added the ``listen`` feature for modules. This feature allows tasks to more easily notify multiple handlers, as well as making it easier for handlers from decoupled roles to be notified. - Major performance improvements. - Added support for binary modules - Added the ability to specify serial batches as a list (``serial: [1, 5, 10]``), which allows for so-called "canary" actions in one play. - Fixed 'local type' plugins and actions to have a more predictable relative path. Fixes a regression of 1.9 (PR #16805). Existing users of 2.x will need to adjust related tasks. - ``meta`` tasks can now use conditionals. - ``raw`` now returns ``changed: true`` to be consistent with shell/command/script modules. Add ``changed_when: false`` to ``raw`` tasks to restore the pre-2.2 behavior if necessary. - New privilege escalation become method ``ksu`` - Windows ``async:`` support for long-running or background tasks. - Windows ``environment:`` support for setting module environment vars in play/task. - Added a new ``meta`` option: ``end_play``, which can be used to skip to the end of a play. - roles can now be included in the middle of a task list via the new ``include_role`` module, this also allows for making the role import 'loopable' and/or conditional. - The service module has been changed to use system specific modules if they exist and fall back to the old service module if they cannot be found or detected. - Add ability to specify what ssh client binary to use on the controller. This can be configured via ssh\_executable in the ansible config file or by setting ansible\_ssh\_executable as an inventory variable if different ones are needed for different hosts. - Windows: - several facts were modified or renamed for consistency with their Unix counterparts, and many new facts were added. If your playbooks rely on any of the following keys, please ensure they are using the correct key names and/or values: - ansible\_date\_time.date (changed to use yyyy-mm-dd format instead of default system-locale format) - ansible\_date\_time.iso8601 (changed to UTC instead of local time) - ansible\_distribution (now uses OS caption string, e.g.: "Microsoft Windows Server 2012 R2 Standard", version is still available on ansible\_distribution\_version) - ansible\_totalmem (renamed to ansible\_memtotal\_mb, units changed to MB instead of bytes) - ``async:`` support for long-running or background tasks. - ``environment:`` support for setting module environment vars in play/task. - Tech Preview: Work has been done to get Ansible running under Python3. This work is not complete enough to depend upon in production environments but it is enough to begin testing it. - Most of the controller side should now work. Users should be able to run python3 /usr/bin/ansible and python3 /usr/bin/ansible-playbook and have core features of ansible work. - A few of the most essential modules have been audited and are known to work. Others work out of the box. - We are using unit and integration tests to help us port code and not regress later. Even if you are not familiar with python you can still help by contributing integration tests (just ansible roles) that exercise more of the code to make sure it continues to run on both Python2 and Python3. - scp\_if\_ssh now supports True, False and "smart". "smart" is the default and will retry failed sftp transfers with scp. - Network: - Refactored all network modules to remove duplicate code and take advantage of Ansiballz implementation - All functionality from \*\_template network modules have been combined into \*\_config module - Network \*\_command modules not longer allow configuration mode statements New Modules ^^^^^^^^^^^ - apache2\_mod\_proxy - asa - asa\_acl - asa\_command - asa\_config - atomic - atomic\_host - atomic\_image - aws - cloudformation\_facts - ec2\_asg\_facts - ec2\_customer\_gateway - ec2\_lc\_find - ec2\_vpc\_dhcp\_options\_facts - ec2\_vpc\_nacl - ec2\_vpc\_nacl\_facts - ec2\_vpc\_nat\_gateway - ec2\_vpc\_peer - ec2\_vpc\_vgw - efs - efs\_facts - execute\_lambda - iam\_mfa\_device\_facts - iam\_server\_certificate\_facts - kinesis\_stream - lambda - lambda\_alias - lambda\_event - lambda\_facts - redshift - redshift\_subnet\_group - s3\_website - sts\_session\_token - cloudstack - cs\_router - cs\_snapshot\_policy - dellos6 - dellos6\_command - dellos6\_config - dellos6\_facts - dellos9 - dellos9\_command - dellos9\_config - dellos9\_facts - dellos10 - dellos10\_command - dellos10\_config - dellos10\_facts - digital\_ocean\_block\_storage - docker - docker\_network - eos - eos\_facts - exoscale: - exo\_dns\_domain - exo\_dns\_record - f5: - bigip\_device\_dns - bigip\_device\_ntp - bigip\_device\_sshd - bigip\_gtm\_datacenter - bigip\_gtm\_virtual\_server - bigip\_irule - bigip\_routedomain - bigip\_selfip - bigip\_ssl\_certificate - bigip\_sys\_db - bigip\_vlan - github - github\_key - github\_release - google - gcdns\_record - gcdns\_zone - gce\_mig - honeybadger\_deployment - illumos - dladm\_etherstub - dladm\_vnic - flowadm - ipadm\_if - ipadm\_prop - ipmi - ipmi\_boot - ipmi\_power - ios - ios\_facts - iosxr - iosxr\_facts - include\_role - jenkins - jenkins\_job - jenkins\_plugin - kibana\_plugin - letsencrypt - logicmonitor - logicmonitor\_facts - lxd - lxd\_profile - lxd\_container - netapp - netapp\_e\_amg - netapp\_e\_amg\_role - netapp\_e\_amg\_sync - netapp\_e\_auth - netapp\_e\_facts - netapp\_e\_flashcache - netapp\_e\_hostgroup - netapp\_e\_host - netapp\_e\_lun\_mapping - netapp\_e\_snapshot\_group - netapp\_e\_snapshot\_images - netapp\_e\_snapshot\_volume - netapp\_e\_storage\_system - netapp\_e\_storagepool - netapp\_e\_volume - netapp\_e\_volume\_copy - netconf\_config - netvisor - pn\_cluster - pn\_ospfarea - pn\_ospf - pn\_show - pn\_trunk - pn\_vlag - pn\_vlan - pn\_vrouterbgp - pn\_vrouterif - pn\_vrouterlbif - pn\_vrouter - nxos - nxos\_aaa\_server\_host - nxos\_aaa\_server - nxos\_acl\_interface - nxos\_acl - nxos\_bgp\_af - nxos\_bgp\_neighbor\_af - nxos\_bgp\_neighbor - nxos\_bgp - nxos\_evpn\_global - nxos\_evpn\_vni - nxos\_file\_copy - nxos\_gir\_profile\_management - nxos\_gir - nxos\_hsrp - nxos\_igmp\_interface - nxos\_igmp - nxos\_igmp\_snooping - nxos\_install\_os - nxos\_interface\_ospf - nxos\_mtu - nxos\_ntp\_auth - nxos\_ntp\_options - nxos\_ntp - nxos\_ospf - nxos\_ospf\_vrf - nxos\_overlay\_global - nxos\_pim\_interface - nxos\_pim - nxos\_pim\_rp\_address - nxos\_portchannel - nxos\_rollback - nxos\_smu - nxos\_snapshot - nxos\_snmp\_community - nxos\_snmp\_contact - nxos\_snmp\_host - nxos\_snmp\_location - nxos\_snmp\_traps - nxos\_snmp\_user - nxos\_static\_route - nxos\_udld\_interface - nxos\_udld - nxos\_vpc\_interface - nxos\_vpc - nxos\_vrf\_af - nxos\_vtp\_domain - nxos\_vtp\_password - nxos\_vtp\_version - nxos\_vxlan\_vtep - nxos\_vxlan\_vtep\_vni - mssql\_db - ovh\_ip\_loadbalancing\_backend - opendj\_backendprop - openstack - os\_keystone\_service - os\_recordset - os\_server\_group - os\_stack - os\_zone - ovirt - ovirt\_auth - ovirt\_disks - ovirt\_vms - rhevm - rocketchat - sefcontext - sensu\_subscription - smartos - smartos\_image\_facts - sros - sros\_command - sros\_config - sros\_rollback - statusio\_maintenance - systemd - telegram - univention - udm\_dns\_record - udm\_dns\_zone - udm\_group - udm\_share - udm\_user - vmware - vmware\_guest - vmware\_local\_user\_manager - vmware\_vmotion - vyos - vyos\_command - vyos\_config - vyos\_facts - wakeonlan - windows - win\_command - win\_robocopy - win\_shell New Callbacks ^^^^^^^^^^^^^ - foreman Minor Changes ~~~~~~~~~~~~~ - now -vvv shows exact path from which 'currently executing module' was picked up from. - loop\_control now has a label option to allow fine grained control what gets displayed per item - loop\_control now has a pause option to allow pausing for N seconds between loop iterations of a task. - New privilege escalation become method ``ksu`` - ``raw`` now returns ``changed: true`` to be consistent with shell/command/script modules. Add ``changed_when: false`` to ``raw`` tasks to restore the pre-2.2 behavior if necessary. - removed previously deprecated ';' as host list separator. - Only check if the default ssh client supports ControlPersist once instead of once for each host + task combination. - Fix a problem with the pip module updating the python pip package itself. - ansible\_play\_hosts is a new magic variable to provide a list of hosts in scope for the current play. Unlike play\_hosts it is not subject to the 'serial' keyword. - ansible\_play\_batch is a new magic variable meant to substitute the current play\_hosts. - The subversion module from core now marks its password parameter as no\_log so the password is obscured when logging. - The postgresql\_lang and postgresql\_ext modules from extras now mark login\_password as no\_log so the password is obscured when logging. - Fix for yum module incorrectly thinking it succeeded in installing packages - Make the default ansible\_managed template string into a static string since all of the replacable values lead to non-idempotent behaviour. For custom front ends using the API: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - ansible.parsing.vault: - VaultLib.is\_encrypted() has been deprecated. It will be removed in 2.4. Use ansible.parsing.vault.is\_encrypted() instead - VaultFile has been removed. This unfinished code was never used inside of Ansible. The feature it was intended to support has now been implemented without using this. - VaultAES, the older, insecure encrypted format that debuted in Ansible-1.5 and was replaced by VaultAES256 less than a week later, now has a deprecation warning. **It will be removed in 2.3**. In the unlikely event that you wrote a vault file in that 1 week window and have never modified the file since (ansible-vault automatically re-encrypts the file using VaultAES256 whenever it is written to but not read), run ``ansible-vault rekey [filename]`` to move to VaultAES256. Removed Deprecated ~~~~~~~~~~~~~~~~~~ - ';' as host list separator. - with\_ 'bare variable' handling, now loop items must always be templated ``{{ }}`` or they will be considered as plain strings. - skipping task on 'missing attribute' in loop variable, now in a loop an undefined attribute will return an error instead of skipping the task. - skipping on undefined variables in loop, now loops will have to define a variable or use ``|default`` to avoid errors. Deprecations ~~~~~~~~~~~~ Notice given that the following will be removed in Ansible 2.4: \* Modules \* eos\_template \* ios\_template \* iosxr\_template \* junos\_template \* nxos\_template \* ops\_template ansible-2.5.1/changelogs/CHANGELOG-v2.3.rst0000644000000000000000000004414413265756155017741 0ustar rootroot00000000000000===================================== Ansible 2.3 "Ramble On" Release Notes ===================================== 2.3.4 "Ramble On" - TBD ----------------------- - Flush stdin when passing the become password. Fixes some cases of timeout on Python3 with the ssh connection plugin: https://github.com/ansible/ansible/pull/35049 Bugfixes ~~~~~~~~ - Fix setting of environment in a task that uses a loop: https://github.com/ansible/ansible/issues/32685 - Fix https retrieval with TLSv1.2: https://github.com/ansible/ansible/pull/32053 2.3.3 "Ramble On" - TBD ----------------------- Bugfixes ~~~~~~~~ - Security fix for CVE-2017-7550 the jenkins\_plugin module was logging the jenkins server password if the url\_password was passed via the params field: https://github.com/ansible/ansible/pull/30875 - Fix alternatives module handlling of non existing options - Fix synchronize traceback with the docker connection plugin - Do not escape backslashes in the template lookup plugin to mirror what the template module does - Fix the expires option of the postgresq\_user module - Fix for win\_acl when settings permissions on registry objects that use ``ALL APPLICATION PACKAGES`` and ``ALL RESTRICTED APPLICATION PACKAGES`` - Python3 fixes - asorted azure modules - pause module - hacking/env-setup script - Fix traceback when checking for passwords in logged strings when logging executed commands. - docker\_login module - Workaround python-libselinux API change in the seboolean module - digital\_ocean\_tag module - Fix the zip filter - Fix user module combining bytes and text - Fix for security groups in the amazon efs module - Fix for the jail connection plugin not finding the named jail - Fix for blockinfile's parameters insertbefore and insertafter - ios\_config: Fix traceback when the efaults parameter is not set - iosxr\_config: Fixed unicode error when UTF-8 characters are in configs - Fix check mode in archive module - Fix UnboundLocalError in check mode in cs\_role module - Fix to always use lowercase hostnames for host keys in known\_hosts module - Added missing return results for win\_stat - Fix rabbitmq modules to give a helpful error if requests is not installed - Fix yum module not deleting rpms that it downloaded - Fix yum module failing with a URL to an rpm - Fix file module inappropriately expanding literal dollar signs in a path read from the filesystem as an environment variable. - Fix the ssh "smart" transport setting which automatically selects the best means of transferring files over ssh (sftp, ssh, piped). - Fix authentication by api\_key parameter in exoscale modules. - vmware module\_utils shared code ssl/validate\_certs fixes in connection logic - allow 'bridge' facts to work for certain containers that create conflicting ones with connection plugins - Fix for win\_get\_url to use TLS 1.2/1.1 if it is available on the host - Fix for the filetree lookup with non-ascii group names - Better message for invalid keywords/options in task due to undefined expressions - Fixed check mode for enable on Solaris for service module - Fix cloudtrail module to allow AWS profiles other than the default - Fix an encoding issue with secret (password) vars\_prompts - Fix for Windows become to show the stdout and stderr strings on a failure - Fix the issue SSL verification can not be disabled for Tower modules - Use safe\_load instead on load to read a yaml document - Fix for win\_file to respect check mode when deleting directories - Include\_role now complains about invalid arguments - Added socket conditions to ignore for wait\_for, no need to error for closing already closed connection - Updated hostname module to work on newer RHEL7 releases - Security fix to avoid provider password leaking in logs for network modules \* Python3 fixes for azure modules 2.3.2 "Ramble On" - 2017-08-04 ------------------------------ Bugfixes ~~~~~~~~ - Fix partend i18n issues - fixed handling of extra vars for tower\_job\_template (#25272) - Python3 bugfixes - Fix sorting of ec2 policies - Fix digital\_ocean dynamic inventory script - Fix for the docker connection plugin - Fix pip module when using python3's pyvenv and python3 -m venv to create virtualenvs - Fix for the AnsiBallZ wrapper so that it gives a better error message when there's not enough disk space to create its tempdir. - Fix so ansilbe-galaxy install --force with unversioned roles will once again overwrite old versions. - Fix for RabbitMQ 3.6.7 endpoint return code changing. - Fix for Foreman organization creation - fixed incorrect fail\_json ref in rpm\_key - Corrected requried on hash\_name for dynamodb\_table - Fix for fetch action plugin not validating correctly - Avoid vault view writing display to logs - htpasswd: fix passlib module version comparison - Fix for flowdock error message when external\_user\_name is missing - fixed corner case for delegate\_to, loops and delegate\_facts - fixed wait\_for python2.4/2.5 compatibility (this is last version this is needed) - fix for adhoc not obeying callback options - fix for win\_find where it fails to recursively scan empty nested directories - fix non-pipelined code paths for Windows (eg, ANSIBLE\_KEEP\_REMOTE\_FILES, non-pipelined connection plugins) - fix for win\_updates where args and check mode were ignored due to common code change - fix for unprivileged users to Windows runas become method - fix starttls code path for mail module - fix missing LC\_TYPE in parted module - fix CN parsing with OpenSSL 1.1 in letsencrypt module - fix params assignment in jabber module - fix TXT record type handling in exo\_dns\_record module - fix message queue message ttl can't be 0 in rabbitmq\_queue module - CloudStack bugfixes: - fix template upload for users in cs\_template module, change default to is\_routing=None - several fixes in cs\_host module fixes hypervisor handling - fix network param ignored due typo in cs\_nic module - fix missing type bool in module cs\_zone - fix KeyError: 'sshkeypair' in cs\_instance module for CloudStack v4.5 and before - fix for win\_chocolatey where trying to upgrade all the packages as per the example docs fails - fix for win\_chocolatey where it did not fail if the version set did not exist - fix for win\_regedit always changing a reg key if the dword values set is a hex - fix for wait\_for on non-Linux systems with newer versions of psutil - fix eos\_banner code and test issues - run tearup and teardown of EAPI service only on EAPI tests - fix eos\_config tests so only Eth1 and Eth2 are used - Fix for potential bug when using legacy inventory vars for configuring the su password. - Fix crash in file module when directories contain non-utf8 filenames - Fix for dnf groupinstall with dnf-2.x - Fix seboolean module for incompatibility in newer Python3 selinux bindings - Optimization for inventory, no need to dedup at every stage, its redundant and slow - Fix fact gathering for package and service action plugins - make random\_choice more error resilient (#27380) - ensure prefix in plugin loading to avoid conflicts - fix for a small number of modules (tempfile, possibly copy) which could fail if the tempdir on the remote box was a symlink - fix non-pipelined code paths for Windows (eg, ANSIBLE\_KEEP\_REMOTE\_FILES, non-pipelined connection plugins) - fix for win\_updates where args and check mode were ignored due to common code change 2.3.1 "Ramble On" - 2017-06-01 ------------------------------ Bugfixes ~~~~~~~~ - Security fix for CVE-2017-7481 - data for lookup plugins used as variables was not being correctly marked as "unsafe". - Fix default value of fetch module's validate\_checksum to be True - Added fix for "meta: refresh\_connection" not working with default 'smart' connection. - Fix template so that the --diff command line option works when the destination is a directory - Fix python3 bugs in pam\_limits - Fix unbound error when using module deprecation as a single string - Several places in which error handling was broken due to bad conversions or just typos - Fix to user module for appending/setting groups on OpenBSD (flags were reversed) - assemble fix to use safer os.join.path, avoids charset issues - fixed issue with solaris facts and i18n - added python2.4 compatiblity fix to sysctl module - Fix comparison of exisiting container security opts in the docker\_container module - fixed service module invocation of insserv on certain platforms - Fix traceback in os\_user in an error case. - Fix docker container to restart a container when changing to fewer exposed ports - Fix tracebacks in docker\_network - Fixes to detection of updated docker images - Handle detection of docker image changes when published ports is changed - Fix for docker\_container restarting images when links list is empty. 2.3 "Ramble On" - 2017-04-12 ---------------------------- Moving to Ansible 2.3 guide http://docs.ansible.com/ansible/porting\_guide\_2.3.html Major Changes ~~~~~~~~~~~~~ - Documented and renamed the previously released 'single var vaulting' feature, allowing user to use vault encryption for single variables in a normal YAML vars file. - Allow module\_utils for custom modules to be placed in site-specific directories and shipped in roles - On platforms that support it, use more modern system polling API instead of select in the ssh connection plugin. This removes one limitation on how many parallel forks are feasible on these systems. - Windows/WinRM supports (experimental) become method "runas" to run modules and scripts as a different user, and to transparently access network resources. - The WinRM connection plugin now uses pipelining when executing modules, resulting in significantly faster execution for small tasks. - The WinRM connection plugin can now manage Kerberos tickets automatically when ``ansible_winrm_transport=kerberos`` and ``ansible_user``/``ansible_password`` are specified. - Refactored/standardized most Windows modules, adding check-mode and diff support where possible. - Extended Windows module API with parameter-type support, helper functions. (i.e. Expand-Environment, Add-Warning, Add-DeprecatationWarning) - restructured how async works to allow it to apply to action plugins that choose to support it. Minor Changes ~~~~~~~~~~~~~ - The version and release facts for OpenBSD hosts were reversed. This has been changed so that version has the numeric portion and release has the name of the release. - removed 'package' from default squash actions as not all package managers support it and it creates errors when using loops, any user can add back via config options if they don't use those package managers or otherwise avoid the errors. - Blocks can now have a ``name`` field, to aid in playbook readability. - default strategy is now configurable via ansible.cfg or environment variable. - Added 'ansible\_playbook\_python' which contains 'current python executable', it can be blank in some cases in which Ansible is not invoked via the standard CLI (sys.executable limitation). - Added 'metadata' to modules to enable classification - ansible-doc now displays path to module and existing 'metadata' - added optional 'piped' transfer method to ssh plugin for when scp and sftp are missing, ssh plugin is also now 'smarter' when using these options - default controlpersist path is now a custom hash of host-port-user to avoid the socket path length errors for long hostnames - Various fixes for Python3 compatibility - Fixed issues with inventory formats not handling 'all' and 'ungrouped' in an uniform way. - 'service' tasks can now use async again, we had lost this capability when changed into an action plugin. - made any\_errors\_fatal inheritable from play to task and all other objects in between. - many small performance improvements in inventory and variable handling and in task execution. - Added a retry class to the ec2\_asg module since customers were running into throttling errors (AWSRetry is a solution for modules using boto3 which isn't applicable here). Deprecations ~~~~~~~~~~~~ - Specifying --tags (or --skip-tags) multiple times on the command line currently leads to the last one overriding all the previous ones. This behaviour is deprecated. In the future, if you specify --tags multiple times the tags will be merged together. From now on, using --tags multiple times on one command line will emit a deprecation warning. Setting the merge\_multiple\_cli\_tags option to True in the ansible.cfg file will enable the new behaviour. In 2.4, the default will be to merge and you can enable the old overwriting behaviour via the config option. In 2.5, multiple --tags options will be merged with no way to go back to the old behaviour. - Modules (scheduled for removal in 2.5) - ec2\_vpc - cl\_bond - cl\_bridge - cl\_img\_install - cl\_interface - cl\_interface\_policy - cl\_license - cl\_ports - nxos\_mtu, use nxos\_system instead New: Callbacks ^^^^^^^^^^^^^^ - dense: minimal stdout output with fallback to default when verbose New: lookups ^^^^^^^^^^^^ - keyring: allows getting password from the 'controller' system's keyrings New: cache ^^^^^^^^^^ - pickle (uses python's own serializer) - yaml New: inventory scripts ^^^^^^^^^^^^^^^^^^^^^^ - oVirt/RHV New: filters ^^^^^^^^^^^^ - combinations - permutations - zip - zip\_longest Module Notes ~~~~~~~~~~~~ - AWS lambda: previously ignored changes that only affected one parameter. Existing deployments may have outstanding changes that this bugfix will apply. - oVirt/RHV: Added support for 4.1 features and the following: - data centers, clusters, hosts, storage domains and networks management. - hosts and virtual machines affinity groups and labels. - users, groups and permissions management. - Improved virtual machines and disks management. - Mount: Some fixes so bind mounts are not mounted each time the playbook runs. New Modules ~~~~~~~~~~~ - a10\_server\_axapi3 - amazon: - aws\_kms - cloudfront\_facts - ec2\_group\_facts - ec2\_lc\_facts - ec2\_vpc\_igw\_facts - ec2\_vpc\_nat\_gateway\_facts - ec2\_vpc\_vgw\_facts - ecs\_ecr - elasticache\_parameter\_group - elasticache\_snapshot - iam\_role - s3\_sync - archive - beadm - bigswitch: - bigmon\_chain - bigmon\_policy - cloudengine: - ce\_command - cloudscale\_server - cloudstack: - cs\_host - cs\_nic - cs\_region - cs\_role - cs\_vpc - dimensiondata\_network - eos: - eos\_banner - eos\_system - eos\_user - f5: - bigip\_gtm\_facts - bigip\_hostname - bigip\_snat\_pool - bigip\_sys\_global - foreman: - foreman - katello - fortios - fortios\_config - gconftool2 - google: - gce\_eip - gce\_snapshot - gcpubsub - gcpubsub\_facts - hpilo: - hpilo\_boot - hpilo\_facts - hponcfg - icinga2\_feature - illumos: - dladm\_iptun - dladm\_linkprop - dladm\_vlan - ipadm\_addr - ipadm\_addrprop - ipadm\_ifprop - infinidat: - infini\_export - infini\_export\_client - infini\_fs - infini\_host - infini\_pool - infini\_vol - ipa: - ipa\_group - ipa\_hbacrule - ipa\_host - ipa\_hostgroup - ipa\_role - ipa\_sudocmd - ipa\_sudocmdgroup - ipa\_sudorule - ipa\_user - ipinfoio\_facts - ios: - ios\_banner - ios\_system - ios\_vrf - iosxr\_system - iso\_extract - java\_cert - jenkins\_script - ldap: - ldap\_attr - ldap\_entry - logstash\_plugin - mattermost - net\_command - netapp: - sf\_account\_manager - sf\_snapshot\_schedule\_manager - sf\_volume\_manager - sf\_volume\_access\_group\_manager - nginx\_status\_facts - nsupdate - omapi\_host - openssl: - openssl\_privatekey - openssl\_publickey - openstack: - os\_nova\_host\_aggregate - os\_quota - openwrt\_init - ordnance: - ordnance\_config - ordnance\_facts - ovirt: - ovirt\_affinity\_groups - ovirt\_affinity\_labels - ovirt\_affinity\_labels\_facts - ovirt\_clusters - ovirt\_clusters\_facts - ovirt\_datacenters - ovirt\_datacenters\_facts - ovirt\_external\_providers - ovirt\_external\_providers\_facts - ovirt\_groups - ovirt\_groups\_facts - ovirt\_host\_networks - ovirt\_host\_pm - ovirt\_hosts - ovirt\_hosts\_facts - ovirt\_mac\_pools - ovirt\_networks - ovirt\_networks\_facts - ovirt\_nics - ovirt\_nics\_facts - ovirt\_permissions - ovirt\_permissions\_facts - ovirt\_quotas - ovirt\_quotas\_facts - ovirt\_snapshots - ovirt\_snapshots\_facts - ovirt\_storage\_domains - ovirt\_storage\_domains\_facts - ovirt\_tags - ovirt\_tags\_facts - ovirt\_templates - ovirt\_templates\_facts - ovirt\_users - ovirt\_users\_facts - ovirt\_vmpools - ovirt\_vmpools\_facts - ovirt\_vms\_facts - pacemaker\_cluster - packet: - packet\_device - packet\_sshkey - pamd - panos: - panos\_address - panos\_admin - panos\_admpwd - panos\_cert\_gen\_ssh - panos\_check - panos\_commit - panos\_dag - panos\_import - panos\_interface - panos\_lic - panos\_loadcfg - panos\_mgtconfig - panos\_nat\_policy - panos\_pg - panos\_restart - panos\_security\_policy - panos\_service - postgresql\_schema - proxmox\_kvm - proxysql: - proxysql\_backend\_servers - proxysql\_global\_variables - proxysql\_manage\_config - proxysql\_mysql\_users - proxysql\_query\_rules - proxysql\_replication\_hostgroups - proxysql\_scheduler - pubnub\_blocks - pulp\_repo - runit - serverless - set\_stats - panos: - panos\_security\_policy - smartos: - imgadm - vmadm - sorcery - stacki\_host - swupd - tempfile - tower: - tower\_credential - tower\_group - tower\_host - tower\_inventory - tower\_job\_template - tower\_label - tower\_organization - tower\_project - tower\_role - tower\_team - tower\_user - vmware: - vmware\_guest\_facts - vmware\_guest\_snapshot - web\_infrastructure: - jenkins\_script - system - parted - windows: - win\_disk\_image - win\_dns\_client - win\_domain - win\_domain\_controller - win\_domain\_membership - win\_find - win\_msg - win\_path - win\_psexec - win\_reg\_stat - win\_region - win\_say - win\_shortcut - win\_tempfile - xbps - zfs: - zfs\_facts - zpool\_facts ansible-2.5.1/changelogs/CHANGELOG-v2.4.rst0000644000000000000000000015775513265756155017757 0ustar rootroot00000000000000======================================== Ansible 2.4 "Dancing Days" Release Notes ======================================== 2.4.4 "Dancing Days" - TBD -------------------------- Bugfixes ~~~~~~~~ - Fix python 3 dictionary runtime error in ios\_confg and eos\_config (https://github.com/ansible/ansible/issues/36717) - Fix ``win_script`` to work with large arguments and removed uneeded function that produces errors and was not needed (https://github.com/ansible/ansible/pull/33855) - Fix timeout when using piped ssh transfer with become https://github.com/ansible/ansible/issues/34523 - Fix win\_scheduled\_task docs to correctly reflect what is required and when (https://github.com/ansible/ansible/issues/35072) - Updated copy test to create recursive symlink during the test and not have it located in the git repo (https://github.com/ansible/ansible/pull/35073) - Fix Digital Ocean tags data type due to backend API changes no longer acceping integers (https://github.com/ansible/ansible/pull/33486) - Fix for nxos\_vxlan\_vtep\_vni issues: https://github.com/ansible/ansible/pull/34946 - Fixes for nxos\_bgp: https://github.com/ansible/ansible/pull/34590 - Enable nxapi nxos\_banner test: https://github.com/ansible/ansible/pull/35033 - fix vxlan idempotent issue in nxos\_vxlan\_vtep: https://github.com/ansible/ansible/pull/34750 - Fix win\_dns\_client to allow setting dynamic IP from static IP (https://github.com/ansible/ansible/pull/35149) - Fix azure\_rm\_subnet absent idempotency issues (https://github.com/ansible/ansible/pull/35037) - Fix azure\_rm\_virtualmachine creating VM with vnet in another resource group (https://github.com/ansible/ansible/pull/35038) - Fix nxos terminal plugin regex to support certain commands (https://github.com/ansible/ansible/pull/35186) - Fix network os\_config modules backward diff (https://github.com/ansible/ansible/pull/35332) - Fix nxos\_snmp\_user removing encryption from user on subsequent runs of the task (https://github.com/ansible/ansible/pull/35433) - Fix traceback in winrm module when the ipaddress module is not installed https://github.com/ansible/ansible/pull/35723/files - Fix bug in ``lineinfile`` where the line would not be inserted when using ``insertbefore`` or ``insertafter`` if the pattern occured anywhere in the file. (https://github.com/ansible/ansible/issues/28721) - Fix connection local getting overridden by network\_cli for transport nxapi,eapi for platform agnostic modules (https://github.com/ansible/ansible/pull/35590) - Include dest i nthe results from file copy: https://github.com/ansible/ansible/pull/35702/ - Fix eos\_config second-level indent idempotece https://github.com/ansible/ansible/pull/35588 - Fix the removed\_in\_version to 2.6 ios\_config force option https://github.com/ansible/ansible/pull/35853 - Fix memory ballooning caused as a result of task caching changes https://github.com/ansible/ansible/pull/35921 - Fix nxos\_igmp\_interface for diff nxos versions (https://github.com/ansible/ansible/pull/35959) - Fix recursion error with many flat includes (https://github.com/ansible/ansible/pull/36075) - Fix win\_uri to work with ``creates`` and ``removed`` option (https://github.com/ansible/ansible/pull/36016) - Fix the oom\_killer parameter to docker\_container not being honored https://github.com/ansible/ansible/pull/34130 - Fix docker\_service so a build is not triggered every time https://github.com/ansible/ansible/issues/36145 - Be more tolerant about spaces when gathering virtual facts (https://github.com/ansible/ansible/pull/36042) - validate add\_host name parameter (https://github.com/ansible/ansible/pull/36055) - spelling fixes (https://github.com/ansible/ansible/pull/36007) - avoid needles vault prompt on ansible-console (https://github.com/ansible/ansible/pull/36244) - fix callback function signatures (https://github.com/ansible/ansible/pull/35664) - Clarify error message from convert\_bool() https://github.com/ansible/ansible/pull/36041 - Fix EC2 C5 instance\_type fact to be kvm: https://github.com/ansible/ansible/pull/35063 - Fix templating of loop\_control properties: https://github.com/ansible/ansible/pull/36124 - Fix dependency in the deb package on Ubuntu-12.04: https://github.com/ansible/ansible/pull/36407 - Fix WinRM Python 3 encoding when getting Kerberos ticket (https://github.com/ansible/ansible/issues/36255) - Always show custom prompt in pause module (https://github.com/ansible/ansible/issues/36057) - Improve performance and recursion depth in include\_role (https://github.com/ansible/ansible/pull/36470) - Fix using ansible\_\*\_interpreter on Python3 with non-new-style modules (old-style ansible python modules, modules written in another language, etc) https://github.com/ansible/ansible/pull/36541 - Fix vyos\_config IndexError in sanitize\_config (https://github.com/ansible/ansible/issues/36351) - Fix vyos\_l3\_interface multiple address assignment to interfaces (https://github.com/ansible/ansible/pull/36721) - Protect from inventory plugins using verify incorrectly https://github.com/ansible/ansible/pull/36591 - loop control templating https://github.com/ansible/ansible/pull/36124 - fix debug output https://github.com/ansible/ansible/pull/36307 - Fix credentials for Ansible Tower modules to work with v1 and v2 of the API (https://github.com/ansible/ansible/pull/36587) (https://github.com/ansible/ansible/pull/36662) - Python3 fixes: - Fix for the znode zookeeper module: https://github.com/ansible/ansible/pull/36999 - Fix for the maven\_artifact module: https://github.com/ansible/ansible/pull/37035 - Add changes to get docker\_container, docker\_common, and docker\_network working with Docker SDK 3.x: https://github.com/ansible/ansible/pull/36973 - Ensure we install ansible-config and ansible-inventory with ``pip install -e`` (https://github.com/ansible/ansible/pull/37151) - Fix for unarchive when users use the --strip-components extra\_opt to tar causing ansible to set permissions on the wrong directory. https://github.com/ansible/ansible/pull/37048 - Fix powershell plugin to handle special chars in environment keys as well as int and bool values (https://github.com/ansible/ansible/pull/37215) - Fix error messages to not be inappropriately templated: https://github.com/ansible/ansible/pull/37329 - Fix Python 3 error in the openssl\_certificate module: https://github.com/ansible/ansible/pull/35143 - Fix traceback when creating or stopping ovirt vms (https://github.com/ansible/ansible/pull/37249) - Connection error messages may contain characters that jinja2 would interpret as a template. Wrap the error string so this doesn't happen (https://github.com/ansible/ansible/pull/37329) 2.4.3 "Dancing Days" - 2018-01-31 --------------------------------- Bugfixes ~~~~~~~~ - Fix ``pamd`` rule args regexp to match file paths (https://github.com/ansible/ansible/pull/33432) - Check if SELinux policy exists before setting (https://github.com/ansible/ansible/pull/31834) - Set locale to ``C`` in ``letsencrypt`` module to fix date parsing errors (https://github.com/ansible/ansible/pull/31339) - Fix include in loop when stategy=free (https://github.com/ansible/ansible/pull/33094) - Fix save parameter in asa\_config (https://github.com/ansible/ansible/pull/32761) - Fix --vault-id support in ansible-pull (https://github.com/ansible/ansible/pull/33629) - In nxos\_interface\_ospf, fail nicely if loopback is used with passive\_interface: (https://github.com/ansible/ansible/pull/33252) - Fix quote filter when given an integer to quote (https://github.com/ansible/ansible/issues/33272) - nxos\_vrf\_interface fix when validating the interface (https://github.com/ansible/ansible/issues/33227) - Fix for win\_copy when sourcing files from an SMBv1 share (https://github.com/ansible/ansible/pull/33576) - correctly report callback plugin file - restrict revaulting to vault cli https://github.com/ansible/ansible/pull/33656 - Fix python3 tracebacks in letsencrypt module (https://github.com/ansible/ansible/pull/32734) - Fix ansible\_\*\_interpreter variables to be templated prior to being used: https://github.com/ansible/ansible/pull/33698 - Fix setting of environment in a task that uses a loop: https://github.com/ansible/ansible/issues/32685 - Fix fetch on Windows failing to fetch files or particular block size (https://github.com/ansible/ansible/pull/33697) - preserve certain fields during no log. https://github.com/ansible/ansible/pull/33637 - fix issue with order of declaration of sections in ini inventory https://github.com/ansible/ansible/pull/33781 - Fix win\_iis\_webapppool to correctly stop a apppool (https://github.com/ansible/ansible/pull/33777) - Fix CloudEngine host failed (https://github.com/ansible/ansible/pull/27876) - Fix ios\_config save issue (https://github.com/ansible/ansible/pull/33791) - Handle vault filenames with nonascii chars when displaying messages (https://github.com/ansible/ansible/pull/33926) - Fix win\_iis\_webapppool to not return passwords (https://github.com/ansible/ansible/pull/33931) - Fix extended file attributes detection and changing: (https://github.com/ansible/ansible/pull/18731) - correctly ensure 'ungrouped' membership rules (https://github.com/ansible/ansible/pull/33878) - made warnings less noisy when empty/no inventory is supplied (https://github.com/ansible/ansible/pull/32806) - Fixes a failure which prevents to create servers in module cloudscale\_server - Fix win\_firewall\_rule "Specified cast is invalid" error when modifying a rule with all of Domain/Public/Private profiles set (https://github.com/ansible/ansible/pull/34383) - Fix case for multilib when installing from a file in the yum module (https://github.com/ansible/ansible/pull/32236) - Fix WinRM parsing/escaping of IPv6 addresses (https://github.com/ansible/ansible/pull/34072) - Fix win\_package to detect MSI regardless of the extension case (https://github.com/ansible/ansible/issues/34465) - Updated win\_mapped\_drive docs to clarify what it is used for (https://github.com/ansible/ansible/pull/34478) - Fix file related modules run in check\_mode when the file being operated on does not exist (https://github.com/ansible/ansible/pull/33967) - Make eos\_vlan idempotent (https://github.com/ansible/ansible/pull/34443) - Fix win\_iis\_website to properly check attributes before setting (https://github.com/ansible/ansible/pull/34501) - Fixed the removal date for ios\_config save and force parameters (https://github.com/ansible/ansible/pull/33885) - cloudstack: fix timeout from ini config file being ignored https://github.com/ansible/ansible/pull/34854 - fixes memory usage issues with many blocks/includes https://github.com/ansible/ansible/issues/31673 https://github.com/ansible/ansible/pull/34461 - Fixes maximum recursion depth exceeded with include\_role https://github.com/ansible/ansible/issues/23609 - Fix to win\_dns\_client module to take ordering of DNS servers to resolve into account: https://github.com/ansible/ansible/pull/34656 - Fix for the nxos\_banner module where some nxos images nest the output inside of an additional dict: https://github.com/ansible/ansible/pull/34695 - Fix failure message "got multiple values for keyword argument id" in the azure\_rm\_securitygroup module (caused by changes to the azure python API): https://github.com/ansible/ansible/pull/34810 - Bump Azure storage client minimum to 1.5.0 to fix deserialization issues. This will break Azure Stack until it receives storage API version 2017-10-01 or changes are made to support multiple versions. (https://github.com/ansible/ansible/pull/34442) - Flush stdin when passing the become password. Fixes some cases of timeout on Python 3 with the ssh connection plugin: https://github.com/ansible/ansible/pull/35049 2.4.2 "Dancing Days" - 2017-11-29 --------------------------------- Bugfixes ~~~~~~~~ - Fix formatting typo in panos\_security\_rule.py docs. (https://github.com/ansible/ansible/commit/c0fc797a06451d2fe1ac4fc077fc64f3a1666447) - Fix rpm spec file to build on RHEL6 without EPEL packages (https://github.com/ansible/ansible/pull/31653) - Keep hosts in play vars if inside of a rescue task (https://github.com/ansible/ansible/pull/31710) - Fix wait\_for module to treat broken connections as unready so that the connection continues to be retried: https://github.com/ansible/ansible/pull/28839 - Python3 fixes: - windows\_azure, clc\_firewall\_policy, and ce\_template modules fixed for imports of urllib which changed between Python2 and Python3 lookup plugin for consul\_kv.py fixed for imports of urllib (https://github.com/ansible/ansible/issues/31240) - Make internal hashing of hostvars use bytes on both python2 and python3 (https://github.com/ansible/ansible/pull/31788) - Fix logging inside of KubernetesAnsibleModule() to not use self.helper.logging. the Ansible builtin log() method will strip out parameters marked no\_log and will not log if no\_log was set in the playbook. self.helper.log() circumvents that (https://github.com/ansible/ansible/pull/31789) - Correct task results display so that it more closely matches what was present in 2.3.x and previous. - Warn when a group has a bad key (Should be one of vars, children, or hosts) https://github.com/ansible/ansible/pull/31495 - Use controller configured ansible\_shell\_executable to run commands in the module (https://github.com/ansible/ansible/pull/31361) - Add documentation about writing unittests for Ansible - Fix bugs in get\_url/uri's SNI and TLS version handling when used on systems that have Python-2.7.9+ and urllib3 installed. - Have ansible-pull process inventory in its own way. Fixes issues with ansible-pull not using the correct inventory, especially for localhost (https://github.com/ansible/ansible/pull/32135) - Fix for implicit localhost receiving too many variables from the all group (https://github.com/ansible/ansible/pull/31959) - Fix the service module to correctly detect which type of init system is present on the host. (https://github.com/ansible/ansible/pull/32086) - Fix inventory patterns to convert to strings before processing: (https://github.com/ansible/ansible/issues/31978) - Fix traceback in firewalld module instead of a nice error message: (https://github.com/ansible/ansible/pull/31949) - Fix for entering privileged mode using eos network modules: (https://github.com/ansible/ansible/issues/30802) - Validate that the destination for ansible-pull is a valid.directory: (https://github.com/ansible/ansible/pull/31499) - Document how to preserve strings of digits as strings in the ini inventory: (https://github.com/ansible/ansible/pull/32047) - Make sure we return ansible\_distribution\_major\_version to macOS: (https://github.com/ansible/ansible/pull/31708) - Fix to ansible-doc -l to list custom inventory plugins: (https://github.com/ansible/ansible/pull/31996) - Fix win\_chocolatey to respect case sensitivity in URLs: (https://github.com/ansible/ansible/pull/31983) - Fix config\_format json in the junos\_facts module: (https://github.com/ansible/ansible/pull/31818) - Allow the apt module's autoremove parameter to take effect in upgrades: (https://github.com/ansible/ansible/pull/30747) - When creating a new use via eos\_user, create the user before setting the user's privilege level: (https://github.com/ansible/ansible/pull/32162) - Fixes nxos\_portchannel idempotence failure on N1 images: (https://github.com/ansible/ansible/pull/31057) - Remove provider from prepare\_ios\_tests integration test: (https://github.com/ansible/ansible/pull/31038) - Fix nxos\_acl change ports to non well known ports and drop time\_range for N1: (https://github.com/ansible/ansible/pull/31261) - Fix nxos\_banner removal idempotence issue in N1 images: (https://github.com/ansible/ansible/pull/31259) - Return error message back to the module (https://github.com/ansible/ansible/pull/31035) - Fix nxos\_igmp\_snooping idempotence: (https://github.com/ansible/ansible/pull/31688) - NXOS integration test nxos\_file\_copy, nxos\_igmp, nxos\_igmp\_interface nxos\_igmp\_snooping, nxos\_ntp\_auth, nxos\_ntp\_options: (https://github.com/ansible/ansible/pull/29030) - Fix elb\_target\_group module traceback when ports were specified inside of the targets parameter: (https://github.com/ansible/ansible/pull/32202) - Fix creation of empty virtual directories in aws\_s3 module: (https://github.com/ansible/ansible/pull/32169) - Enable echo for ``pause`` module: (https://github.com/ansible/ansible/issues/14160) - Fix for ``hashi_vault`` lookup to return all keys at a given path when no key is specified (https://github.com/ansible/ansible/pull/32182) - Fix for ``win_package`` to allow TLS 1.1 and 1.2 on web requests: (https://github.com/ansible/ansible/pull/32184) - Remove provider from ios integration test: (https://github.com/ansible/ansible/pull/31037) - Fix eos\_user tests (https://github.com/ansible/ansible/pull/32261) - Fix ansible-galaxy --force with installed roles: (https://github.com/ansible/ansible/pull/32282) - ios\_interface testfix: (https://github.com/ansible/ansible/pull/32335) - Fix ios integration tests: (https://github.com/ansible/ansible/pull/32342) - Ensure there is always a basdir so we always pickup group/host\_vars https://github.com/ansible/ansible/pull/32269 - Fix vars placement in ansible-inventory https://github.com/ansible/ansible/pull/32276 - Correct options for luseradd in user module https://github.com/ansible/ansible/pull/32262 - Clarified package docs on 'latest' state https://github.com/ansible/ansible/pull/32397 - Fix issue with user module when local is true (https://github.com/ansible/ansible/pull/32262 and https://github.com/ansible/ansible/pull/32411) - Fix for max\_fail\_percentage being inaccurate: (https://github.com/ansible/ansible/issues/32255) - Fix check mode when deleting ACS instance in azure\_rm\_acs module: (https://github.com/ansible/ansible/pull/32063) - Fix ios\_logging smaller issues and make default size for buffered work: (https://github.com/ansible/ansible/pull/32321) - Fix ios\_logging module issue where facility is being deleted along with host: (https://github.com/ansible/ansible/pull/32234) - Fix wrong prompt issue for network modules (https://github.com/ansible/ansible/pull/32426) - Fix eos\_eapi to enable non-default vrfs if the default vrf is already configured (https://github.com/ansible/ansible/pull/32112) - Fix network parse\_cli filter in case of single match is not caught when using start\_block and end\_block (https://github.com/ansible/ansible/pull/31092) - Fix win\_find failing on files it can't access, change behaviour to be more like the find module (https://github.com/ansible/ansible/issues/31898) - Amended tracking of 'changed' https://github.com/ansible/ansible/pull/31812 - Fix label assignment in ovirt\_host\_networks (https://github.com/ansible/ansible/pull/31973) - Fix fencing and kuma usage in ovirt\_cluster module (https://github.com/ansible/ansible/pull/32190) - Fix failure during upgrade due to NON\_RESPONSIVE state for ovirt\_hosts module (https://github.com/ansible/ansible/pull/32192) - ini inventory format now correclty handles group creation w/o need for specific orders https://github.com/ansible/ansible/pull/32471 - Fix for quoted paths in win\_service (https://github.com/ansible/ansible/issues/32368) - Fix tracebacks for non-ascii paths when parsing inventory (https://github.com/ansible/ansible/pull/32511) - Fix git archive when update is set to no (https://github.com/ansible/ansible/pull/31829) - Fix locale when screen scraping in the yum module (https://github.com/ansible/ansible/pull/32203) - Fix for validating proxy results on Python3 for modules making http requests: (https://github.com/ansible/ansible/pull/32596) - Fix unreferenced variable in SNS topic module (https://github.com/ansible/ansible/pull/29117) - Handle ignore\_errors in loops (https://github.com/ansible/ansible/pull/32546) - Fix running with closed stdin on python 3 (https://github.com/ansible/ansible/pull/31695) - Fix undefined variable in script inventory plugin (https://github.com/ansible/ansible/pull/31381) - Fix win\_copy on Python 2.x to support files greater than 4GB (https://github.com/ansible/ansible/pull/32682) - Add extra error handling for wmare connect to correctly detect scenarios where username does not have the required logon permissions (https://github.com/ansible/ansible/pull/32613) - Fix ios\_config file prompt issue while using save\_when (https://github.com/ansible/ansible/pull/32744) - Prevent host\_group\_vars plugin load errors when using 'path as inventory hostname' https://github.com/ansible/ansible/issues/32764 - Better errors when loading malformed vault envelopes (https://github.com/ansible/ansible/issues/28038) - nxos\_interface error handling (https://github.com/ansible/ansible/pull/32846) - Fix snmp bugs on Nexus 3500 platform (https://github.com/ansible/ansible/pull/32773) - nxos\_config and nxos\_facts - fixes for N35 platform (https://github.com/ansible/ansible/pull/32762) - fix dci failure nxos (https://github.com/ansible/ansible/pull/32877) - Do not execute ``script`` tasks is check mode (https://github.com/ansible/ansible/issues/30676) - Keep newlines when reading LXC container config file (https://github.com/ansible/ansible/pull/32219) - Fix a traceback in os\_floating\_ip when required instance is already present in the cloud: https://github.com/ansible/ansible/pull/32887 - Fix for modifying existing application load balancers using certificates (https://github.com/ansible/ansible/pull/28217) - Fix --ask-vault-pass with no tty and password from stdin (https://github.com/ansible/ansible/issues/30993) - Fix for IIS windows modules to use hashtables instead of PSCustomObject (https://github.com/ansible/ansible/pull/32710) - Fix nxos\_snmp\_host bug (https://github.com/ansible/ansible/pull/32916) - Make IOS devices consistent ios\_logging (https://github.com/ansible/ansible/pull/33100) - restore error on orphan group:vars delcaration for ini inventories https://github.com/ansible/ansible/pull/32866 - restore host/group\_vars merge order https://github.com/ansible/ansible/pull/32963 - use correct loop var when delegating https://github.com/ansible/ansible/pull/32986 - Handle sets and datetime objects in inventory sources fixing tracebacks https://github.com/ansible/ansible/pull/32990 - Fix for breaking change to Azure Python SDK DNS RecordSet constructor in azure-mgmt-dns==1.2.0 https://github.com/ansible/ansible/pull/33165 - Fix for breaking change to Azure Python SDK that prevented some members from being returned in facts modules https://github.com/ansible/ansible/pull/33169 - restored glob/regex host pattern matching to traverse groups and hosts and not return after first found https://github.com/ansible/ansible/pull/33158 - change nxos\_interface module to use "show interface" to support more platforms https://github.com/ansible/ansible/pull/33037 2.4.1 "Dancing Days" - 2017-10-25 --------------------------------- Bugfixes ~~~~~~~~ - Security fix for CVE-2017-7550 the jenkins\_plugin module was logging the jenkins server password if the url\_password was passed via the params field: https://github.com/ansible/ansible/pull/30875 - Update openssl\* module documentation to show openssl-0.16 is the minimum version - Fix openssl\_certificate's csr handling - Python-3 fixes - Fix openssl\_certificate parameter assertion on Python3 - Fix for python3 and nonascii strings in inventory plugins (https://github.com/ansible/ansible/pull/30666) - Fix missing urllib in iam\_policy - Fix crypttab module for bytes<=>text string mismatch ( https://github.com/ansible/ansible/pull/30457 ) - Fix lxc\_container module combining bytes with text ( https://github.com/ansible/ansible/pull/30572 ) - Fix map doesn't return a list on python3 in ec2\_snapshot\_facts module (https://github.com/ansible/ansible/pull/30606) - Fix uri (and other url retrieving) modules when used with a proxy. (https://github.com/ansible/ansible/issues/31109) - Fix azure\_rm dynamic inventory script ConfigParser usage. - Fix for win\_file to respect check mode when deleting directories - Fix for Ansible.ModuleUtils.Legacy.psm1 to return list params correctly - Fix for a proper logout in the module ovirt\_vms - Fixed docs for 'password' lookup - Corrected and added missing feature and porting docs for 2.4 - Fix for Ansible.ModuleUtils.CamelConversion to handle empty lists and lists with one entry - Fix nxos terminal regex to parse username correctly. - Fix colors for selective callback - Fix for 'New password' prompt on 'ansible-vault edit' (https://github.com/ansible/ansible/issues/30491) - Fix for 'ansible-vault encrypt' with vault\_password\_file in config and --ask-vault-pass cli (https://github.com/ansible/ansible/pull/30514#pullrequestreview-63395903) - updated porting guide with notes for callbacks and config - Added backwards compatiblity shim for callbacks that do not inherit from CallbackBase - Corrected issue with configuration and multiple ini entries being overwriten even when not set. - backported fix for doc generation (plugin\_formatter) - Fix ec2\_lc module for an unknown parameter name (https://github.com/ansible/ansible/pull/30573) - Change configuration of defaults to use standard jinja2 instead of custom eval() for using variables in the default field of config (https://github.com/ansible/ansible/pull/30650) - added missing entry in chlog deprecation - Fixed precedence and values for become flags and executable settings - Fix for win\_domain\_membership to throw more helpful error messages and check/fix when calling WMI function after changing workgroup - Fix for win\_power\_plan to compare the OS version's correctly and work on Windows 10/Server 2016 - Fix module doc for typo in telnet command option - Fix OpenBSD pkg\_mgr fact (https://github.com/ansible/ansible/issues/30623) - Fix encoding error when there are nonascii values in the path to the ssh binary - removed YAML inventory group name validation, broke existing setups and should be global in any case, and configurable - performance improvment for inventory, had slown down considerably from 2.3 - Fix cpu facts on sparc64 (https://github.com/ansible/ansible/pull/30261) - Fix ansible\_distribution fact for Arch linux (https://github.com/ansible/ansible/issues/30600) - remove print statements from play\_context/become - Fix vault errors after 'ansible-vault edit' (https://github.com/ansible/ansible/issues/30575) - updated api doc example to match api changes - corrected issues with slack callback plugin - it is import\_playbook, not import\_plays, docs now reflect this - fixed typo and missed include/import conversion in import\_tasks docs - updated porting docs with note about inventory\_dir - removed extension requirement for yaml inventory plugin to restore previous behaviour - fixed ansible-pull to now correctly deal with inventory - corrected dig lookup docs - fix type handling for sensu\_silence so the module works - added fix for win\_iis\_webapppool to correctly handle array elements - Fix bugs caused by lack of collector ordering like service\_mgr being incorrect (https://github.com/ansible/ansible/issues/30753) - Fix os\_image when the id parameter is not set in the task. ( https://github.com/ansible/ansible/pull/29147 ) - Fix for the winrm connection to use proper task vars - removed typo from dig lookup docs - Updated win\_chocolatey example to be clearer around what should be used with become - Fix for copy module when permissions are changed but the file contents are not ( https://github.com/ansible/ansible/issues/30556 ) - corrected YAML\_FILENAME\_EXTENSIONS ini setter as key/section were swapped - Better error message when a yaml inventory is invalid - avoid include\_Xs conflating vars with options - Fix aws\_s3 module handling ``encrypt`` option (https://github.com/ansible/ansible/pull/31203) - Fix for win\_msg to document and show error when message is greater than 255 characters - Fix for win\_dotnet\_ngen to work after recent regression - fixed backwards compat method for config - removed docs for prematurely added ssh specific pipelining settings - fixed redis cache typo - Fix AttributeError during inventory group deserialization (https://github.com/ansible/ansible/issues/30903) - Fix 'ansible-vault encrypt --output=-' (https://github.com/ansible/ansible/issues/30550) - restore pre 2.4 pipeline configuration options (env and ini) - Fix win\_copy regression: handling of vault-encrypted source files (https://github.com/ansible/ansible/pull/31084) - Updated return values for win\_reg\_stat to correctly show what is being returned (https://github.com/ansible/ansible/pull/31252) - reduced normal error redundancy and verbosity, display on increased and when needed - Give an informative error instead of a traceback if include\_vars dir is file instead of directory (https://github.com/ansible/ansible/pull/31157) - Fix monit module's version check for color support (https://github.com/ansible/ansible/pull/31212) - Make ``elasticsearch_plugin`` module work with both 2.x and 5.x (https://github.com/ansible/ansible/issues/21989) - Fix for become on Windows to handle ignored errors (https://github.com/ansible/ansible/issues/30468) - Fix removal of newlines when writing SELinux config (https://github.com/ansible/ansible/issues/30618) - clarified extension requirement for constructed inv plugin - really turn off inventory caching, toggle will be added in 2.5 - for inventory sources, dont follow symlinks to calculate base directory, used for group/host\_vars - Port the uptime.py example script to the new inventory API. - inventory\_file variable again returns full path, not just basename - added info about cwd group/host vars to porting guide - Fix name parsing out of envra in the yum module - give user friendly error on badly formatted yaml inventory source - Fix any\_errors\_fatal setting in playbooks. - Fix setting of ssh-extra-args from the cli (https://github.com/ansible/ansible/pull/31326) - Change SELinux fact behavior to always return a dictionary. (https://github.com/ansible/ansible/issues/18692) - Revert a fix for using non /bin/sh shells for modules' running commands as this was causing output from commands to change, thus breaking playbooks. See the original bug for details and links to the eventual fix: https://github.com/ansible/ansible/issues/24169 - Do not log data field in ``docker_secrets`` module (https://github.com/ansible/ansible/pull/31366) - Fix rpm\_key taking the wrong 8 chars from the keyid (https://github.com/ansible/ansible/pull/31045) - chown errors now more informative - Fix for win\_copy to copy a source file that has invalid windows characters in the filename, the dest still must be have valid windows characters (https://github.com/ansible/ansible/issues/31336#issuecomment-334649927) - Fix systemd module to not run daemon-reload in check mode. - fixed some parsing and selection issues with inventory manager, fixed minor bugs in yaml and constructed plugins - Fix the ping module documentation to reference win\_ping instead of itself: https://github.com/ansible/ansible/pull/31444 - Fix for ec2\_win\_password to allow blank key\_passphrase again (https://github.com/ansible/ansible/pull/28791) - added toggle for vars\_plugin behaviour to execute relative to playbook, set default to revert to previous way. - Fix for win\_copy to not remove destination file on change when in check mode (https://github.com/ansible/ansible/pull/31469) - Fix include\_role usage of role\_name (https://github.com/ansible/ansible/pull/31463) - Fix service and package forcing a second run of the setup module to function (https://github.com/ansible/ansible/issues/31485) - Better error message when attempting to use include or import with /usr/bin/ansible (https://github.com/ansible/ansible/pull/31492/) - Fix ``sysctl`` module to remove etries when ``state=absent`` (https://github.com/ansible/ansible/issues/29920) - Fix for ec2\_group to avoid trying to iterate over None (https://github.com/ansible/ansible/pull/31531) - Fix for ec2\_group for a possible KeyError bug (https://github.com/ansible/ansible/pull/31540) - Fix for the rpm\_key module when importing the first gpg key on a system (https://github.com/ansible/ansible/pull/31514) - Fix for aws\_s3 metadata to use the correct parameters when uploading a file (https://github.com/ansible/ansible/issues/31232) - Fix for the yum module when installing from file/url crashes (https://github.com/ansible/ansible/pull/31529) - Improved error messaging for Windows become/runas when username is bogus (https://github.com/ansible/ansible/pull/31551) - Fix rollback feature in junos\_config to now allow configuration rollback on device (https://github.com/ansible/ansible/pull/31424) - Remove command executed log from ansible-connection (https://github.com/ansible/ansible/pull/31581) - Fix relative paths to be relative to config file when there is no playbook available (https://github.com/ansible/ansible/issues/31533) - Fix Inventory plugins to use the configured inventory plugin path (https://github.com/ansible/ansible/issues/31605) - Fix include task to be dynamic (https://github.com/ansible/ansible/issues/31593) - A couple fixes to the test process to account for new testing resources in our ci system and an upstream cryptography update that didn't work with pip-8.x - Document backup\_path in a few dellos modules and vyos\_config (https://github.com/ansible/ansible/issues/31844) - Fix for vmware\_vm\_facts with dangling inaccessible VM which don't have MAC addresses (https://github.com/ansible/ansible/pull/31629) - Fix for win\_regedit sending extra data that could confuse ansible's result parsing (https://github.com/ansible/ansible/pull/31813) - Fix git module to correctly cleanup temporary dirs (https://github.com/ansible/ansible/pull/31541) - Fix for modules which use atomic\_move() to rename files raising an exception if a file could not be opened. Fix will return a nice error message instead: https://github.com/ansible/ansible/issues/31786 - Fix ansible-doc and ansible-console module-path option (https://github.com/ansible/ansible/pull/31744) - Fix for hostname module on RHEL 7.5 (https://github.com/ansible/ansible/issues/31811) - Fix provider password leak in logs for asa modules (https://github.com/ansible/ansible/issues/32343) - Fix tagging for dynamodb\_table if region is not explicitly passed to the module (https://github.com/ansible/ansible/pull/32557) - Fix Python 3 decode error in ``cloudflare_dns`` (https://github.com/ansible/ansible/pull/32065) Known Bugs ~~~~~~~~~~ - Implicit localhost is getting ansible\_connection from all:vars instead of from the implicit localhost definition (https://github.com/ansible/ansible/issues/31420) 2.4 "Dancing Days" - 2017/09/18 ------------------------------- Major Changes ~~~~~~~~~~~~~ - Support for Python-2.4 and Python-2.5 on the managed system's side was dropped. If you need to manage a system that ships with Python-2.4 or Python-2.5, you'll need to install Python-2.6 or better on the managed system or run Ansible-2.3 until you can upgrade the system. - New import/include keywords to replace the old bare ``include`` directives. The use of ``static: {yes|no}`` on such includes is now deprecated. - Using ``import_*`` (``import_playbook``, ``import_tasks``, ``import_role``) directives are static. - Using ``include_*`` (``include_tasks``, ``include_role``) directives are dynamic. This is done to avoid collisions and possible security issues as facts come from the remote targets and they might be compromised. - New ``order`` play level keyword that allows the user to change the order in which Ansible processes hosts when dispatching tasks. - Users can now set group merge priority for groups of the same depth (parent child relationship), using the new ``ansible_group_priority`` variable, when values are the same or don't exist it will fallback to the previous sorting by name'. - Inventory has been revamped: - Inventory classes have been split to allow for better management and deduplication - Logic that each inventory source duplicated is now common and pushed up to reconciliation - VariableManager has been updated for better interaction with inventory - Updated CLI with helper method to initialize base objects for plays - New inventory plugins for creating inventory - Old inventory formats are still supported via plugins - Inline host\_list is also an inventory plugin, an example alternative ``advanced_host_list`` is also provided (it supports ranges) - New configuration option to list enabled plugins and precedence order ``[inventory]enable_plugins`` in ansible.cfg - vars\_plugins have been reworked, they are now run from Vars manager and API has changed (need docs) - Loading group\_vars/host\_vars is now a vars plugin and can be overridden - It is now possible to specify multiple inventory sources in the command line (-i /etc/hosts1 -i /opt/hosts2) - Inventory plugins can use the cache plugin (i.e. virtualbox) and is affected by ``meta: refresh_inventory`` - Group variable precedence is now configurable via new 'precedence' option in ansible.cfg (needs docs) - Improved warnings and error messages across the board - Configuration has been changed from a hardcoded listing in the constants module to dynamically loaded from yaml definitions - Also added an ansible-config CLI to allow for listing config options and dumping current config (including origin) - TODO: build upon this to add many features detailed in ansible-config proposal https://github.com/ansible/proposals/issues/35 - Windows modules now support the use of multiple shared module\_utils files in the form of Powershell modules (.psm1), via ``#Requires -Module Ansible.ModuleUtils.Whatever.psm1`` - Python module argument\_spec now supports custom validation logic by accepting a callable as the ``type`` argument. - Windows become\_method: runas is no longer marked ``experimental`` - Windows become\_method: runas now works across all authtypes and will auto-elevate under UAC if WinRM user has "Act as part of the operating system" privilege - Do not escape backslashes in the template lookup plugin to mirror what the template module does https://github.com/ansible/ansible/issues/26397 Deprecations ~~~~~~~~~~~~ - The behaviour when specifying ``--tags`` (or ``--skip-tags``) multiple times on the command line has changed so that the tags are merged together by default. See the documentation for how to temporarily use the old behaviour if needed: https://docs.ansible.com/ansible/intro\_configuration.html#merge-multiple-cli-tags - The ``fetch`` module's ``validate_md5`` parameter has been deprecated and will be removed in 2.8. If you wish to disable post-validation of the downloaded file, use validate\_checksum instead. - Those using ansible as a library should note that the ``ansible.vars.unsafe_proxy`` module is deprecated and slated to go away in 2.8. The functionality has been moved to ``ansible.utils.unsafe_proxy`` to avoid a circular import. - The win\_get\_url module has the dictionary 'win\_get\_url' in its results deprecated, its content is now also available directly in the resulting output, like other modules. - Previously deprecated 'hostfile' config settings have been 're-deprecated' as before the code did not warn about deprecated configuration settings, but it does now. Deprecated Modules (to be removed in 2.8): ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - azure: use M(azure\_rm\_virtualmachine) instead - cs\_nic: replaced by cs\_instance\_nic\_secondaryip, also see new module cs\_instance\_nic for managing nics - ec2\_facts: replaced by ec2\_metadata\_facts - ec2\_remote\_facts: replaced by ec2\_instance\_facts - panos\_address: use M(panos\_object) instead - panos\_nat\_policy: use M(panos\_nat\_rule) instead - panos\_security\_policy: use M(panos\_security\_rule) instead - panos\_service: use M(panos\_object) instead - s3: replaced by aws\_s3 - win\_msi: use M(win\_package) instead Removed Modules (previously deprecated): ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - eos\_template: use eos\_config instead - ios\_template: use ios\_config instead - iosxr\_template: use iosxr\_config instead - junos\_template: use junos\_config instead - nxos\_template: use nxos\_config instead - openswitch - ops\_template: use ops\_config instead Minor Changes ~~~~~~~~~~~~~ - Now deprecated configuration options issue warnings when set. - Removed unused and deprecated config option ``pattern`` - Updated the copy of six bundled for modules to use from 1.4.1 to 1.10.0 - The ``inventory_dir`` var is not a global anymore, as we now allow multiple inventory sources, it is now host dependant. This means it cannot be used wherever host vars are not permitted, for example in task/handler names. - Fixed a cornercase with ini inventory vars. Previously, if an inventory var was a quoted string with hash marks ("#") in it then the parsed string included the quotes. Now the string will not be quoted. Previously, if the quoting ended before the string finished and then the hash mark appeared, the hash mark was included as part of the string. Now it is treated as a trailing comment: # Before: var1="string#comment" ===> var1: ""string#comment"" var1="string" #comment ===> var1: ""string" #comment" # After: var1="string#comment" ===> var1: "string#comment" var1="string" #comment ===> var1: "string" The new behaviour mirrors how the variables would appear if there was no hash mark in the string. \* As of 2.4.0, the fetch module fails if there are errors reading the remote file. Use ``ignore_errors`` or ``failed_when`` in playbooks if you wish to ignore errors. \* Experimentally added pmrun become method. \* Enable the docker connection plugin to use su as a become method \* Add an encoding parameter for the replace module so that it can operate on non-utf-8 files \* By default, Ansible now uses the cryptography module to implement vault instead of the older pycrypto module. \* Changed task state resulting from both ``rc`` and ``failed`` fields returned, 'rc' no longer overrides 'failed'. Test plugins have also been updated accordingly. \* The win\_unzip module no longer includes dictionary 'win\_unzip' in its results, the content is now directly in the resulting output, like pretty much every other module. \* Rewrite of the copy module so that it handles cornercases with symbolic links and empty directories. The copy module has a new parameter, ``local_follow`` which controls how links on the source system are treated. (The older parameter, follow is for links on the remote system.) \* Update the handling of symbolic file permissions in file-related mode parameters to deal with multiple operators. For instance, ``mode='u=rw+x-X'`` to set the execute bit on directories, remove it from filea, and set read-write on both is now supported \* Added better cookie parsing to fetch\_url/open\_url. Cookies are now in a dictionary named ``cookies`` in the fetch\_url result. Anything using ``open_url`` directly can pass a cookie object as a named arg (``cookies``), and then parse/format the cookies in the result. \* The bundled copy of six in lib/ansible/module\_utils/six is now used unconditionally. The code to fallback on a system six interfered with static analysis of the code so the cost of using the fallback code became too high. Distributions which wish to unbundle may do so by replacing the bundled six in ansible/module\_utils/six/\ **init**.py. Six is tricky to unbundle, however, so they may want to base their efforts off the code we were using: \* https://github.com/ansible/ansible/blob/2fff690caab6a1c6a81973f704be3fbd0bde2c2f/lib/ansible/module\_utils/six/\ **init**.py \* Update ipaddr Jinja filters to replace existing non RFC compliant ones. Added additional filters for easier use of handling IP addresses. (PR #26566) \* datetime filter updated to use default format of datetime.datetime (ISO8601) \* The junit plugin now has an option to report a junit test failure on changes for idempotent testing. \* New 'diff' keyword allows setting diff mode on playbook objects, overriding command line option and config. \* New config settings for inventory to: - control inventory plugins used - extensions of files to ignore when using inventory directory - patterns of files to ignore when using inventory directory - option to toggle failed inventory source parsing between an error or a warning \* More fixes for Python 3 across the code base. \* win\_shell and win\_command modules now properly preserve quoted arguments passed on the command-line. Tasks that attempted to work around the issue by adding extra quotes/escaping may need to be reworked. See https://github.com/ansible/ansible/issues/23019 for additional detail. \* All configuration paths are now relative to the ``ansible.cfg`` file used. \* By user request, a 'configuration macro' (``CWD``) is available to force configured paths to be relative to the current working directory. Please note that this is unsafe and not recommended. New Callbacks: ^^^^^^^^^^^^^^ - full\_skip - profile\_roles - stderr New Connection plugins: ^^^^^^^^^^^^^^^^^^^^^^^ - buildah - saltstack New Filters: ^^^^^^^^^^^^ - ipaddr filter gained several new suboptions - first\_usable - ip/prefix - ip\_netmask - last\_usable - next\_usable - network\_id - network/prefix - network\_netmask - network\_wildcard - previous\_usable - range\_usable - size\_usable - wildcard - next\_nth\_usable - network\_in\_network - network\_in\_usable - previous\_nth\_usable - parse\_cli - parse\_cli\_textfsm - strftime - urlsplit New Inventory Plugins: ^^^^^^^^^^^^^^^^^^^^^^ - advanced\_host\_list - constructed - host\_list - ini - openstack - script - virtualbox - yaml New Inventory scripts: ^^^^^^^^^^^^^^^^^^^^^^ - lxd New Lookups: ^^^^^^^^^^^^ - chef\_databag - cyberarkpassword - hiera New Tests: ^^^^^^^^^^ - any : true if any element is true - all: true if all elements are true Module Notes ~~~~~~~~~~~~ - By mistake, an early version of elb\_classic\_lb, elb\_instance, and elb\_classic\_lb\_facts modules were released and marked as stableinterface. These are now marked as preview in 2.4.1 and their parameters and return values may change in 2.5.0. Part of this mistake included deprecating the ec2\_elb\_lb, ec2\_lb, and ec2\_elb\_facts modules prematurely. These modules won't be deprecated until the replacements above have a stableinterface and the erroneous deprecation has been fixed in 2.4.1. - The docker\_container module has gained a new option, ``working_dir`` which allows specifying the working directory for the command being run in the image. - The ec2\_win\_password module now requires the cryptography python module be installed to run - The stat module added a field, lnk\_target. When the file being stated is a symlink, lnk\_target will contain the target of the link. This differs from lnk\_source when the target is specified relative to the symlink. In this case, lnk\_target will remain relative while lnk\_source will be expanded to an absolute path. - The archive module has a new parameter exclude\_path which lists paths to exclude from the archive - The yum module has a new parameter security which limits state=latest to security updates - The template module gained a follow parameter to match with copy and file. Like those modules, template defaults this parameter to False. Previously, template hardcoded this to true. - Added a new parameter to command module that lets users specify data to pipe into the command's stdin. - The azure\_rm modules now accept a ``cloud_environment`` arg to access regional and private clouds. - The azure\_rm modules and inventory script now require at least version 2.0.0 of the Azure Python SDK. New Modules ~~~~~~~~~~~ Cloud ^^^^^ - amazon - aws\_api\_gateway - aws\_direct\_connect\_connection - aws\_direct\_connect\_link\_aggregation\_group - aws\_s3 - aws\_s3\_bucket\_facts - aws\_waf\_facts - data\_pipeline - dynamodb\_ttl - ec2\_instance\_facts - ec2\_metadata\_facts - ec2\_vpc\_dhcp\_option\_facts - ec2\_vpc\_endpoint - ec2\_vpc\_endpoint\_facts - ec2\_vpc\_peering\_facts - ecs\_attribute - elb\_application\_lb - elb\_application\_lb\_facts - elb\_target\_group - elb\_target\_group\_facts - iam\_group - iam\_managed\_policy - lightsail - redshift\_facts - azure - azure\_rm\_acs - azure\_rm\_availabilityset - azure\_rm\_availabilityset\_facts - azure\_rm\_dnsrecordset - azure\_rm\_dnsrecordset\_facts - azure\_rm\_dnszone - azure\_rm\_dnszone\_facts - azure\_rm\_functionapp - azure\_rm\_functionapp\_facts - azure\_rm\_loadbalancer - azure\_rm\_loadbalancer\_facts - azure\_rm\_managed\_disk - azure\_rm\_managed\_disk\_facts - azure\_rm\_virtualmachine\_extension - azure\_rm\_virtualmachine\_scaleset - azure\_rm\_virtualmachine\_scaleset\_facts - atomic - atomic\_container - cloudstack - cs\_instance\_nic - cs\_instance\_nic\_secondaryip - cs\_network\_acl - cs\_network\_acl\_rule - cs\_storage\_pool - cs\_vpn\_gateway - digital\_ocean - digital\_ocean\_floating\_ip - docker - docker\_secret - docker\_volume - google - gce\_labels - gcp\_backend\_service - gcp\_forwarding\_rule - gcp\_healthcheck - gcp\_target\_proxy - gcp\_url\_map - misc - helm - ovirt - ovirt\_host\_storage\_facts - ovirt\_scheduling\_policies\_facts - ovirt\_storage\_connections - vmware - vcenter\_license - vmware\_guest\_find - vmware\_guest\_tools\_wait - vmware\_resource\_pool Commands ^^^^^^^^ - telnet Crypto ^^^^^^ - openssl\_certificate - openssl\_csr Files ^^^^^ - xml Identity ^^^^^^^^ - cyberark - cyberark\_authentication - cyberark\_user - ipa - ipa\_dnsrecord Monitoring ^^^^^^^^^^ - sensu\_client - sensu\_handler - sensu\_silence Network ^^^^^^^ - aci - aci\_aep - aci\_ap - aci\_bd - aci\_bd\_subnet - aci\_bd\_to\_l3out - aci\_contract - aci\_contract\_subject\_to\_filter - aci\_epg - aci\_epg\_monitoring\_policy - aci\_epg\_to\_contract - aci\_epg\_to\_domain - aci\_filter - aci\_filter\_entry - aci\_intf\_policy\_fc - aci\_intf\_policy\_l2 - aci\_intf\_policy\_lldp - aci\_intf\_policy\_mcp - aci\_intf\_policy\_port\_channel - aci\_intf\_policy\_port\_security - aci\_l3out\_route\_tag\_policy - aci\_rest - aci\_taboo\_contract - aci\_tenant - aci\_tenant\_action\_rule\_profile - aci\_tenant\_span\_dst\_group - aci\_vrf - aireos - aireos\_command - aireos\_config - aruba - aruba\_command - aruba\_config - avi - avi\_actiongroupconfig - avi\_alertconfig - avi\_alertemailconfig - avi\_alertscriptconfig - avi\_alertsyslogconfig - avi\_authprofile - avi\_backup - avi\_backupconfiguration - avi\_cloud - avi\_cloudconnectoruser - avi\_cloudproperties - avi\_cluster - avi\_controllerproperties - avi\_dnspolicy - avi\_gslb - avi\_gslbapplicationpersistenceprofile - avi\_gslbgeodbprofile - avi\_gslbhealthmonitor - avi\_gslbservice - avi\_hardwaresecuritymodulegroup - avi\_httppolicyset - avi\_ipaddrgroup - avi\_ipamdnsproviderprofile - avi\_microservicegroup - avi\_network - avi\_networksecuritypolicy - avi\_poolgroupdeploymentpolicy - avi\_prioritylabels - avi\_scheduler - avi\_seproperties - avi\_serverautoscalepolicy - avi\_serviceengine - avi\_serviceenginegroup - avi\_snmptrapprofile - avi\_stringgroup - avi\_trafficcloneprofile - avi\_useraccountprofile - avi\_vrfcontext - avi\_vsdatascriptset - avi\_vsvip - avi\_webhook - bigswitch - bcf\_switch - cloudengine - ce\_aaa\_server - ce\_aaa\_server\_host - ce\_acl - ce\_acl\_advance - ce\_acl\_interface - ce\_bfd\_global - ce\_bfd\_session - ce\_bfd\_view - ce\_bgp - ce\_bgp\_af - ce\_bgp\_neighbor - ce\_bgp\_neighbor\_af - ce\_config - ce\_dldp - ce\_dldp\_interface - ce\_eth\_trunk - ce\_evpn\_bd\_vni - ce\_evpn\_bgp - ce\_evpn\_bgp\_rr - ce\_evpn\_global - ce\_facts - ce\_file\_copy - ce\_info\_center\_debug - ce\_info\_center\_global - ce\_info\_center\_log - ce\_info\_center\_trap - ce\_interface - ce\_interface\_ospf - ce\_ip\_interface - ce\_link\_status - ce\_mlag\_config - ce\_mlag\_interface - ce\_mtu - ce\_netconf - ce\_netstream\_aging - ce\_netstream\_export - ce\_netstream\_global - ce\_netstream\_template - ce\_ntp - ce\_ntp\_auth - ce\_ospf - ce\_ospf\_vrf - ce\_reboot - ce\_rollback - ce\_sflow - ce\_snmp\_community - ce\_snmp\_contact - ce\_snmp\_location - ce\_snmp\_target\_host - ce\_snmp\_traps - ce\_snmp\_user - ce\_startup - ce\_static\_route - ce\_stp - ce\_switchport - ce\_vlan - ce\_vrf - ce\_vrf\_af - ce\_vrf\_interface - ce\_vrrp - ce\_vxlan\_arp - ce\_vxlan\_gateway - ce\_vxlan\_global - ce\_vxlan\_tunnel - ce\_vxlan\_vap - cloudvision - cv\_server\_provision - eos - eos\_logging - eos\_vlan - eos\_vrf - f5 - bigip\_command - bigip\_config - bigip\_configsync\_actions - bigip\_gtm\_pool - bigip\_iapp\_service - bigip\_iapp\_template - bigip\_monitor\_tcp\_echo - bigip\_monitor\_tcp\_half\_open - bigip\_provision - bigip\_qkview - bigip\_snmp - bigip\_snmp\_trap - bigip\_ucs - bigip\_user - bigip\_virtual\_address - fortios - fortios\_address - interface - net\_interface - net\_linkagg - net\_lldp\_interface - ios - ios\_interface - ios\_logging - ios\_static\_route - ios\_user - iosxr - iosxr\_banner - iosxr\_interface - iosxr\_logging - iosxr\_user - junos - junos\_banner - junos\_interface - junos\_l3\_interface - junos\_linkagg - junos\_lldp - junos\_lldp\_interface - junos\_logging - junos\_static\_route - junos\_system - junos\_vlan - junos\_vrf - layer2 - net\_l2\_interface - net\_vlan - layer3 - net\_l3\_interface - net\_vrf - netscaler - netscaler\_cs\_action - netscaler\_cs\_policy - netscaler\_cs\_vserver - netscaler\_gslb\_service - netscaler\_gslb\_site - netscaler\_gslb\_vserver - netscaler\_lb\_monitor - netscaler\_lb\_vserver - netscaler\_save\_config - netscaler\_server - netscaler\_service - netscaler\_servicegroup - netscaler\_ssl\_certkey - nuage - nuage\_vspk - nxos - nxos\_banner - nxos\_logging - panos - panos\_nat\_rule - panos\_object - panos\_security\_rule - protocol - net\_lldp - routing - net\_static\_route - system - net\_banner - net\_logging - net\_system - net\_user - vyos - vyos\_banner - vyos\_interface - vyos\_l3\_interface - vyos\_linkagg - vyos\_lldp - vyos\_lldp\_interface - vyos\_logging - vyos\_static\_route - vyos\_user Notification ^^^^^^^^^^^^ - bearychat - catapult - office\_365\_connector\_card Remote Management ^^^^^^^^^^^^^^^^^ - hpe - oneview\_fc\_network - imc - imc\_rest - manageiq - manageiq\_user Source Control ^^^^^^^^^^^^^^ - github\_deploy\_key - github\_issue Storage ^^^^^^^ - nuage\_vpsk - panos - panos\_sag - purestorage - purefa\_hg - purefa\_host - purefa\_pg - purefa\_snap - purefa\_volume System ^^^^^^ - aix\_lvol - awall - dconf - interfaces\_file Web Infrastructure ^^^^^^^^^^^^^^^^^^ - gunicorn - rundeck\_acl\_policy - rundeck\_project Windows ^^^^^^^ - win\_defrag - win\_domain\_group - win\_domain\_user - win\_dsc - win\_eventlog - win\_eventlog\_entry - win\_firewall - win\_group\_membership - win\_hotfix - win\_mapped\_drive - win\_pagefile - win\_power\_plan - win\_psmodule - win\_rabbitmq\_plugin - win\_route - win\_security\_policy - win\_toast - win\_user\_right - win\_wait\_for - win\_wakeonlan ansible-2.5.1/changelogs/CHANGELOG-v2.5.rst0000644000000000000000000010477413265756155017751 0ustar rootroot00000000000000=================================== Ansible 2.5 "Kashmir" Release Notes =================================== v2.5.1 ====== Release Summary --------------- | Release Date: 2018-04-18 | `Porting Guide `_ Minor Changes ------------- - Updated example in vcenter_license module. - Updated virtual machine facts with instanceUUID which is unique for each VM irrespective of name and BIOS UUID. Bugfixes -------- - EOS can not check configuration without use of config session (ANSIBLE_EOS_USE_SESSIONS=0). Fix is to throw error when hiting into this exception case. Configs would neither be checked nor be played on the eos device. - Adds exception handling which is raised when user does not have correct set of permissions/privileges to read virtual machine facts. - onyx_pfc_interface - Add support for changes in pfc output in onyx 3.6.6000 https://github.com/ansible/ansible/pull/37651 - Fix mlag summary json parsing for onyx version 3.6.6000 and above https://github.com/ansible/ansible/pull/38191 - Update documentation related to datacenter in vmware_guest_find module. Mark datacenter as optional. - Set default network type as 'dhcp' if user has not specified any. - nmcli change default value of autoconnect - azure_rm_image - Allow Azure images to be created with tags, bug was introduced in Ansible v2.5.0 - azure_rm_networkinterface - fixed examples in module documentation and added fix to allow an IP configuration with no public IP (https://github.com/ansible/ansible/pull/36824) - azure_rm_virtualmachine - removed docs note that says on marketplace images can be used, custom images were added in 2.5 - Improve keyed groups for complex inventory - Made separator configurable - Fixed some exception types - Better error messages - backup options doc change to reflect backup directory location in case playbook is run from a role - filters - Don't overwrite builtin jinja2 filters with tests (https://github.com/ansible/ansible/pull/37881) - edgeos_command - add action plugin to backup config (https://github.com/ansible/ansible/pull/37619) - eos_vlan - fixed eos_vlan not working when having more than 6 interfaces (https://github.com/ansible/ansible/pull/38347) - Various grafana_* modules - Port away from the deprecated b64encodestring function to the b64encode function instead. (https://github.com/ansible/ansible/pull/38388) - include_role - Fix parameter templating (https://github.com/ansible/ansible/pull/36372) - include_vars - Call DataLoader.load with the correct signature to prevent hang on error processing (https://github.com/ansible/ansible/pull/38194) - ios_interface - neighbors option now include CDP neighbors (https://github.com/ansible/ansible/pull/37667) - ios_l2_interface - fix removal of trunk vlans (https://github.com/ansible/ansible/pull/37389) - Add supported connection in junos module documentation (https://github.com/ansible/ansible/pull/38813) - _nxos_switchport - fix removal of trunk vlans (https://github.com/ansible/ansible/pull/37328) - nxos_l2_interface - fix removal of trunk vlans (https://github.com/ansible/ansible/pull/37336) - nxos_snapshot - fix documentation and add required parameter logic (https://github.com/ansible/ansible/pull/37232, https://github.com/ansible/ansible/pull/37248) - Improve integration test - Ensure each transport test runs only once (https://github.com/ansible/ansible/pull/37462) - nxos_user - Integration test (https://github.com/ansible/ansible/pull/37852) - nxos_bgp_af - Fix UnboundLocalError (https://github.com/ansible/ansible/pull/37610) - nxos_vrf - Fix nxos_vrf issues (https://github.com/ansible/ansible/pull/37092) - nxos_vrf_af - Fix nxos_vrf_af issues (https://github.com/ansible/ansible/pull/37211) - nxos_udld - Fix nxos_udld issues (https://github.com/ansible/ansible/pull/37418) - nxos_vlan - Fix nxos_vlan issues (https://github.com/ansible/ansible/pull/38008) - nxos_vlan - nxos_vlan purge (https://github.com/ansible/ansible/pull/38202) - nxos_aaa_server - Fix nxos_aaa_server (https://github.com/ansible/ansible/pull/38117) - nxos_aaa_server_host - Fix nxos_aaa_server_host (https://github.com/ansible/ansible/pull/38188) - nxos_acl - Fix nxos_acl (https://github.com/ansible/ansible/pull/38283) - nxos_static_route - Fix nxos_static_route (https://github.com/ansible/ansible/pull/37614) - nxos_acl_interface test - Fix nxos_acl_interface test (https://github.com/ansible/ansible/pull/38230) - nxos_igmp - Fix nxos_igmp (https://github.com/ansible/ansible/pull/38496) - nxos_hsrp - Fix nxos_hsrp (https://github.com/ansible/ansible/pull/38410) - nxos_igmp_snooping - Fix nxos_igmp_snooping (https://github.com/ansible/ansible/pull/38566) - nxos_ntp_auth - Fix nxos_ntp_auth issues (https://github.com/ansible/ansible/pull/38824) - nxos_ntp_options - Fix nxos_ntp_options issues (https://github.com/ansible/ansible/pull/38695) - Fix onyx_config action plugin when used on Python 3 https://github.com/ansible/ansible/pull/38343 - openssl_certificate - Handle dump() in check_mode https://github.com/ansible/ansible/pull/38386 - Fix traceback when creating or stopping ovirt vms (https://github.com/ansible/ansible/pull/37249) - Fix for consul_kv idempotence on Python3 https://github.com/ansible/ansible/issues/35893 - Fix csvfile lookup plugin when used on Python3 https://github.com/ansible/ansible/pull/37625 - ec2 - Fix ec2 user_data parameter to properly convert to base64 on python3 (https://github.com/ansible/ansible/pull/37628) - Fix to send and receive bytes over a socket in the haproxy module which was causing tracebacks on Python3 https://github.com/ansible/ansible/pull/35176 - jira module - Fix bytes/text handling for base64 encoding authentication tokens (https://github.com/ansible/ansible/pull/33862) - ansible-pull - fixed a bug checking for changes when we've pulled from the git repository on python3 https://github.com/ansible/ansible/issues/36962 - Fix bytes/text handling in vagrant dynamic inventory https://github.com/ansible/ansible/pull/37631 - wait_for_connection - Fix python3 compatibility bug (https://github.com/ansible/ansible/pull/37646) - restore stderr ouput even if script module run is successful (https://github.com/ansible/ansible/pull/38177) - ec2_asg - no longer terminates an instance before creating a replacement (https://github.com/ansible/ansible/pull/36679) - ec2_group - security groups in default VPCs now have a default egress rule (https://github.com/ansible/ansible/pull/38018) - inventory correctly removes hosts from 'ungrouped' group (https://github.com/ansible/ansible/pull/37617) - letsencrypt - fixed domain matching authorization (https://github.com/ansible/ansible/pull/37558) - letsencrypt - improved elliptic curve account key parsing (https://github.com/ansible/ansible/pull/37275) - facts are no longer processed more than once for each action (https://github.com/ansible/ansible/issues/37535) - cs_vpc_offering - only return VPC offferings matching name arg (https://github.com/ansible/ansible/pull/37783) - cs_configuration - filter names inside the module instead of relying on API (https://github.com/ansible/ansible/pull/37910) - various fixes to networking module connection subsystem (https://github.com/ansible/ansible/pull/37529) - ios_* - fixed netconf issues (https://github.com/ansible/ansible/pull/38155) - ovirt_* - various bugfixes (https://github.com/ansible/ansible/pull/38341) - ansible-vault no longer requires '--encrypt-vault-id' with edit (https://github.com/ansible/ansible/pull/35923) - k8s lookup plugin now uses same auth method as other k8s modules (https://github.com/ansible/ansible/pull/37533) - ansible-inventory now properly displays group_var graph (https://github.com/ansible/ansible/pull/38744) - setup - FreeBSD fact gathering no longer fails on missing dmesg, sysctl, etc (https://github.com/ansible/ansible/pull/37194) - inventory scripts now read passwords without byte interpolation (https://github.com/ansible/ansible/pull/35582) - user - fixed password expiration support in FreeBSD - meta - inventory_refresh now works properly on YAML inventory plugins (https://github.com/ansible/ansible/pull/38242) - foreman callback plugin - fixed API options (https://github.com/ansible/ansible/pull/38138) - win_certificate_store - fixed a typo that stopped it from getting the key_storage values - win_copy - Preserve the local tmp folder instead of deleting it so future tasks can use it (https://github.com/ansible/ansible/pull/37964) - powershell - fixed issue with passing in a bool and int to the Windows environment block, also allow special chars in the env key name (https://github.com/ansible/ansible/pull/37215) - Ansible.ModuleUtils.FileUtil - Catch DirectoryNotFoundException with Test-AnsiblePath (https://github.com/ansible/ansible/pull/37968) - win_exec_wrapper - support loading of Windows modules different different line endings than the core modules (https://github.com/ansible/ansible/pull/37291) - win_reboot - fix deprecated warning message to show version in correct spot (https://github.com/ansible/ansible/pull/37898) - win_regedit - wait for garbage collection to finish before trying to unload the hive in case handles didn't unload in time (https://github.com/ansible/ansible/pull/38912) - win_service - Fix bug with win_service not being able to handle special chars like '[' (https://github.com/ansible/ansible/pull/37897) - win_setup - Use connection name for network interfaces as interface name isn't helpful (https://github.com/ansible/ansible/pull/37327) - win_setup - fix bug where getting the machine SID would take a long time in large domain environments (https://github.com/ansible/ansible/pull/38646) - win_updates - handle if the module fails to load and return the error message (https://github.com/ansible/ansible/pull/38363) - win_uri - do not override existing header when using the ``headers`` key. (https://github.com/ansible/ansible/pull/37845) - win_uri - convert status code values to an int before validating them in server response (https://github.com/ansible/ansible/pull/38080) - windows - display UTF-8 characters correctly in Windows return json (https://github.com/ansible/ansible/pull/37229) - winrm - when managing Kerberos tickets in Ansible, get a forwardable ticket if delegation is set (https://github.com/ansible/ansible/pull/37815) v2.5.0 ====== Major Changes ------------- - Ansible Network improvements * Created new connection plugins ``network_cli`` and ``netconf`` to replace ``connection=local``. ``connection=local`` will continue to work for a number of Ansible releases. * No more ``unable to open shell``. A clear and descriptive message will be displayed in normal ansible-playbook output without needing to enable debug mode * Loads of documentation, see `Ansible for Network Automation Documentation `_. * Refactor common network shared code into package under ``module_utils/network/`` * Filters: Add a filter to convert XML response from a network device to JSON object. * Loads of bug fixes. * Plus lots more. - New simpler and more intuitive 'loop' keyword for task loops. The ``with_`` loops will likely be deprecated in the near future and eventually removed. - Added fact namespacing; from now on facts will be available under ``ansible_facts`` namespace (for example: ``ansible_facts.os_distribution``) without the ``ansible_`` prefix. They will continue to be added into the main namespace directly, but now with a configuration toggle to enable this. This is currently on by default, but in the future it will default to off. - Added a configuration file that a site administrator can use to specify modules to exclude from being used. Minor Changes ------------- - ansible-inventory - now supports a ``--export`` option to preserve group_var data (https://github.com/ansible/ansible/pull/36188) - Added a few new magic vars corresponding to configuration/command line options: ``ansible_diff_mode``, ``ansible_inventory_sources``, ``ansible_limit``, ``ansible_run_tags`` , ``ansible_forks`` and ``ansible_skip_tags`` - Updated the bundled copy of the six library to 1.11.0 - Added support to ``become`` ``NT AUTHORITY\System``, ``NT AUTHORITY\LocalService``, and ``NT AUTHORITY\NetworkService`` on Windows hosts - Fixed ``become`` to work with async on Windows hosts - Improved ``become`` elevation process to work on standard Administrator users without disabling UAC on Windows hosts - The jenkins\_plugin and yum\_repository plugins had their ``params`` option removed because they circumvented Ansible's option processing. - The combine filter now accepts a list of dicts as well as dicts directly - New CLI options for ansible-inventory, ansible-console and ansible to allow specifying a playbook\_dir to be used for relative search paths. - `The `stat`` and ``win_stat`` modules have changed the default value of ``get_md5`` to ``False`` which will result in the ``md5`` return value not being returned. This option will be removed altogether in Ansible 2.9. Use ``get_checksum: True`` with ``checksum_algorithm: md5`` to return an md5 hash of the file under the ``checksum`` return value. - The ``osx_say`` module was renamed into ``say``. - Task debugger functionality was moved into ``StrategyBase``, and extended to allow explicit invocation from use of the ``debugger`` keyword. The ``debug`` strategy is still functional, and is now just a trigger to enable this functionality. - The documentation has undergone a major overhaul. Content has been moved into targeted guides; the table of contents has been cleaned up and streamlined; the CSS theme has been updated to a custom version of the most recent ReadTheDocs theme, and the underlying directory structure for the RST files has been reorganized. - The ANSIBLE\_REMOTE\_TMP environment variable has been added to supplement (and override) ANSIBLE\_REMOTE\_TEMP. This matches with the spelling of the config value. ANSIBLE\_REMOTE\_TEMP will be deprecated in the future. - aci_* modules - added signature based authentication - aci_* modules - included dedicated ACI documentation - aci_* modules - improved ACI return values Deprecated Features ------------------- - Apstra's ``aos_*`` modules are deprecated as they do not work with AOS 2.1 or higher. See new modules at `https://github.com/apstra `_. - Previously deprecated 'hostfile' config settings have been 're-deprecated' because previously code did not warn about deprecated configuration settings. - Using Ansible-provided Jinja tests as filters is deprecated and will be removed in Ansible 2.9. - The ``stat`` and ``win_stat`` modules have deprecated ``get_md5`` and the ``md5`` return values. These options will become undocumented in Ansible 2.9 and removed in a later version. - The ``redis_kv`` lookup has been deprecated in favor of new ``redis`` lookup - Passing arbitrary parameters that begin with ``HEADER_`` to the uri module, used for passing http headers, is deprecated. Use the ``headers`` parameter with a dictionary of header names to value instead. This will be removed in Ansible 2.9 - Passing arbitrary parameters to the zfs module to set zfs properties is deprecated. Use the ``extra_zfs_properties`` parameter with a dictionary of property names to values instead. This will be removed in Ansible 2.9. - Use of the AnsibleModule parameter ``check\_invalid\_arguments`` in custom modules is deprecated. In the future, all parameters will be checked to see whether they are listed in the arg spec and an error raised if they are not listed. This behaviour is the current and future default so most custom modules can simply remove ``check\_invalid\_arguments`` if they set it to the default value of True. The ``check\_invalid\_arguments`` parameter will be removed in Ansible 2.9. - The nxos\_ip\_interface module is deprecated in Ansible 2.5. Use nxos\_l3\_interface module instead. - The nxos\_portchannel module is deprecated in Ansible 2.5. Use nxos\_linkagg module instead. - The nxos\_switchport module is deprecated in Ansible 2.5. Use nxos\_l2\_interface module instead. - The ec2\_ami\_find has been deprecated; use ec2\_ami\_facts instead. - panos\_security\_policy: Use panos\_security\_rule - the old module uses deprecated API calls - vsphere\_guest is deprecated in Ansible 2.5 and will be removed in Ansible-2.9. Use vmware\_guest module instead. Removed Features (previously deprecated) ---------------------------------------- - accelerate. - boundary\_meter: There was no deprecation period for this but the hosted service it relied on has gone away so the module has been removed. `#29387 `__ - cl\_ : cl\_interface, cl\_interface\_policy, cl\_bridge, cl\_img\_install, cl\_ports, cl\_license, cl\_bond. Use ``nclu`` instead - docker. Use docker\_container and docker\_image instead. - ec2\_vpc. - ec2\_ami\_search, use ec2\_ami\_facts instead. - nxos\_mtu. Use nxos\_system's ``system_mtu`` option instead. To specify an interface's MTU use nxos\_interface. - panos\_nat\_policy: Use panos\_nat\_rule the old module uses deprecated API calls New Lookup Plugins ------------------ - aws\_account\_attribute: Query AWS account attributes such as EC2-Classic availability - aws\_service\_ip\_ranges: Query AWS IP ranges for services such as EC2/S3 - aws\_ssm: Query AWS ssm data - config: Lookup Ansible settings - conjur\_variable: Fetch credentials from CyberArk Conjur - k8s: Query the K8s API - nios: Query Infoblox NIOS objects - openshift: Return info from Openshift installation - redis: look up date from Redis DB, deprecates the redis\_kv one. New Callback Plugins -------------------- - null - unixy - yaml New Connection Plugins ---------------------- - kubectl - oc - netconf - network\_cli - The existing network\_cli and netconf connection plugins can now be used directly with network modules. See `Network Best Practices for Ansible 2.5 `_ for more details. New Filter Plugins ------------------ - parse\_xml New Modules ----------- - Cloud (amazon) - aws\_acm\_facts - aws\_application\_scaling\_policy - aws\_az\_facts - aws\_batch\_compute\_environment - aws\_batch\_job\_definition - aws\_batch\_job\_queue - aws\_direct\_connect\_gateway - aws\_direct\_connect\_virtual\_interface - aws\_elasticbeanstalk\_app - aws\_kms\_facts - aws\_region\_facts - aws\_s3\_cors - aws\_ses\_identity - aws\_ssm\_parameter\_store - aws\_waf\_condition - aws\_waf\_rule - aws\_waf\_web\_acl - cloudfront\_distribution - cloudfront\_invalidation - cloudfront\_origin\_access\_identity - cloudwatchlogs\_log\_group - cloudwatchlogs\_log\_group\_facts - ec2\_ami\_facts - ec2\_asg\_lifecycle\_hook - ec2\_customer\_gateway\_facts - ec2\_instance - ec2\_placement\_group - ec2\_placement\_group\_facts - ec2\_vpc\_egress\_igw - ecs\_taskdefinition\_facts - elasticache\_facts - elb\_target - iam\_role\_facts - iam\_user - Cloud (azure) - azure\_rm\_containerinstance - azure\_rm\_containerregistry - azure\_rm\_image - azure\_rm\_keyvault - azure\_rm\_keyvaultkey - azure\_rm\_keyvaultsecret - azure\_rm\_mysqldatabase - azure\_rm\_mysqlserve - azure\_rm\_postgresqldatabase - azure\_rm\_postgresqlserver - azure\_rm\_sqldatabase - azure\_rm\_sqlserver - azure\_rm\_sqlserver\_facts - Cloud (cloudstack) - cs\_network\_offering - cs\_service\_offering - cs\_vpc\_offering - cs\_vpn\_connection - cs\_vpn\_customer\_gateway - Cloud (digital\_ocean) - digital\_ocean\_certificate - digital\_ocean\_floating\_ip\_facts - digital\_ocean\_sshkey\_facts - Cloud (google) - gcp\_dns\_managed\_zone - Cloud (misc) - cloudscale\_floating\_ip - spotinst\_aws\_elastigroup - terraform - Cloud (oneandone) - oneandone\_firewall\_policy - oneandone\_load\_balancer - oneandone\_monitoring\_policy - oneandone\_private\_network - oneandone\_public\_ip - oneandone\_server - Cloud (openstack) - os\_keystone\_endpoint - os\_project\_access - Cloud (ovirt) - ovirt\_api\_facts - ovirt\_disk\_facts - Cloud (vmware) - vcenter\_folder - vmware\_cfg\_backup - vmware\_datastore\_facts - vmware\_drs\_rule\_facts - vmware\_guest\_file\_operation - vmware\_guest\_powerstate - vmware\_host\_acceptance - vmware\_host\_config\_facts - vmware\_host\_config\_manager - vmware\_host\_datastore - vmware\_host\_dns\_facts - vmware\_host\_facts - vmware\_host\_firewall\_facts - vmware\_host\_firewall\_manager - vmware\_host\_lockdown - vmware\_host\_ntp - vmware\_host\_package\_facts - vmware\_host\_service\_facts - vmware\_host\_service\_manager - vmware\_host\_vmnic\_facts - vmware\_local\_role\_manager - vmware\_vm\_vm\_drs\_rule - vmware\_vmkernel\_facts - Cloud (vultr) - vr\_account\_facts - vr\_dns\_domain - vr\_dns\_record - vr\_firewall\_group - vr\_firewall\_rule - vr\_server - vr\_ssh\_key - vr\_startup\_script - vr\_user - Clustering - etcd3 - k8s - k8s\_raw - k8s\_scale - openshift - openshift\_raw - openshift\_scale - Crypto - openssl\_dhparam - Database - influxdb - influxdb\_query - influxdb\_user - influxdb\_write - Identity - ipa - ipa\_dnszone - ipa\_service - ipa\_subca - keycloak - keycloak\_client - keycloak\_clienttemplate - Monitoring - grafana\_dashboard - grafana\_datasource - grafana\_plugin - icinga2\_host - zabbix - zabbix\_proxy - zabbix\_template - Net Tools - ip\_netns - nios - nios\_dns\_view - nios\_host\_record - nios\_network - nios\_network\_view - nios\_zone - Network (aci) - aci\_aaa\_user - aci\_aaa\_user\_certificate - aci\_access\_port\_to\_interface\_policy\_leaf\_profile - aci\_aep\_to\_domain - aci\_domain - aci\_domain\_to\_encap\_pool - aci\_domain\_to\_vlan\_pool - aci\_encap\_pool - aci\_encap\_pool\_range - aci\_fabric\_node - aci\_firmware\_source - aci\_interface\_policy\_leaf\_policy\_group - aci\_interface\_policy\_leaf\_profile - aci\_interface\_selector\_to\_switch\_policy\_leaf\_profile - aci\_static\_binding\_to\_epg - aci\_switch\_leaf\_selector - aci\_switch\_policy\_leaf\_profile - aci\_switch\_policy\_vpc\_protection\_group - aci\_vlan\_pool - aci\_vlan\_pool\_encap\_block - Network (avi) - avi\_api\_version - avi\_clusterclouddetails - avi\_customipamdnsprofile - avi\_errorpagebody - avi\_errorpageprofile - avi\_gslbservice\_patch\_member - avi\_wafpolicy - avi\_wafprofile - Network (dimension data) - dimensiondata\_vlan - Network (edgeos) - edgeos\_command - edgeos\_config - edgeos\_facts - Network (enos) - enos\_command - enos\_config - enos\_facts - Network (eos) - eos\_interface - eos\_l2\_interface - eos\_l3\_interface - eos\_linkagg - eos\_lldp - eos\_static\_route - Network (f5) - bigip\_asm\_policy - bigip\_device\_connectivity - bigip\_device\_group - bigip\_device\_group\_member - bigip\_device\_httpd - bigip\_device\_trust - bigip\_gtm\_server - bigip\_iapplx\_package - bigip\_monitor\_http - bigip\_monitor\_https - bigip\_monitor\_snmp\_dca - bigip\_monitor\_udp - bigip\_partition - bigip\_policy - bigip\_policy\_rule - bigip\_profile\_client\_ssl - bigip\_remote\_syslog - bigip\_security\_address\_list - bigip\_security\_port\_list - bigip\_software\_update - bigip\_ssl\_key - bigip\_static\_route - bigip\_traffic\_group - bigip\_ucs\_fetch - bigip\_vcmp\_guest - bigip\_wait - bigiq\_regkey\_license - bigiq\_regkey\_pool - Network (fortimanager) - fmgr\_script - Network (ios) - ios\_l2\_interface - ios\_l3\_interface - ios\_linkagg - ios\_lldp - ios\_vlan - Network (iosxr) - iosxr\_netconf - Network (ironware) - ironware\_command - ironware\_config - ironware\_facts - Network (junos) - junos\_l2\_interface - junos\_scp - Network (netact) - netact\_cm\_command - Network (netscaler) - netscaler\_nitro\_request - Network (nso) - nso\_action - nso\_config - nso\_query - nso\_show - nso\_verify - Network (nxos) - nxos\_l2\_interface - nxos\_l3\_interface - nxos\_linkagg - nxos\_lldp - Network (onyx) - onyx\_bgp - onyx\_command - onyx\_config - onyx\_facts - onyx\_interface - onyx\_l2\_interface - onyx\_l3\_interface - onyx\_linkagg - onyx\_lldp - onyx\_lldp\_interface - onyx\_magp - onyx\_mlag\_ipl - onyx\_mlag\_vip - onyx\_ospf - onyx\_pfc\_interface - onyx\_protocol - onyx\_vlan - Network (panos) - panos\_dag\_tags - panos\_match\_rule - panos\_op - panos\_query\_rules - Network (radware) - vdirect\_commit - vdirect\_runnable - Network (vyos) - vyos\_vlan - Notification - logentries\_msg - say - snow\_record - Packaging - os - package\_facts - rhsm\_repository - Remote Management (manageiq) - manageiq\_alert\_profiles - manageiq\_alerts - manageiq\_policies - manageiq\_tags - Remote Management (oneview) - oneview\_datacenter\_facts - oneview\_enclosure\_facts - oneview\_logical\_interconnect\_group - oneview\_logical\_interconnect\_group\_facts - oneview\_san\_manager\_facts - Remote Management (ucs) - ucs\_ip\_pool - ucs\_lan\_connectivity - ucs\_mac\_pool - ucs\_san\_connectivity - ucs\_vhba\_template - ucs\_vlans - ucs\_vnic\_template - ucs\_vsans - ucs\_wwn\_pool - System - mksysb - nosh - service\_facts - vdo - Web Infrastructure - jenkins\_job\_facts - Windows - win\_audit\_policy\_system - win\_audit\_rule - win\_certificate\_store - win\_disk\_facts - win\_product\_facts - win\_scheduled\_task\_stat - win\_whoami Bugfixes -------- - tower_* modules - fix credentials to work with v1 and v2 of Ansible Tower API - azure_rm modules - updated with internal changes to use API profiles and kwargs for future Azure Stack support and better stability between SDK updates. (https://github.com/ansible/ansible/pull/35538) - fixed memory bloat on nested includes by preventing blocks from self-parenting (https://github.com/ansible/ansible/pull/36075) - updated to ensure displayed messages under peristent connections are returned to the controller (https://github.com/ansible/ansible/pull/36064) - docker_container, docker_image, docker_network modules - Update to work with Docker SDK 3.1 - edgeos_facts - fix error when there are no commit revisions (https://github.com/ansible/ansible/issues/37123) - eos_vrf and eos_eapi - fixed vrf parsing (https://github.com/ansible/ansible/pull/35791) - include_role - improved performance and recursion depth (https://github.com/ansible/ansible/pull/36470) - interface_file - now accepts interfaces without address family or method (https://github.com/ansible/ansible/pull/34200) - lineinfile - fixed insertion if pattern already exists (https://github.com/ansible/ansible/pull/33393) - lineinfile - fixed regexp used with insert(before|after) inserting duplicate lines (https://github.com/ansible/ansible/pull/36156) - Connection error messages may contain characters that jinja2 would interpret as a template. Wrap the error string so this doesn't happen (https://github.com/ansible/ansible/pull/37329) - nxos_evpn_vni - fixed a number of issues (https://github.com/ansible/ansible/pull/35930) - nxos_igmp_interface - fixed response handling for different nxos versions (https://github.com/ansible/ansible/pull/35959) - nxos_interface_ospf - added various bugfixes (https://github.com/ansible/ansible/pull/35988) - Fix onyx_linkagg module writing debugging information to a tempfile on the remote machine (https://github.com/ansible/ansible/pull/37308) - openshift modules - updated to client version 0.4.0 (https://github.com/ansible/ansible/pull/35127) - setup.py - Ensure we install ansible-config and ansible-inventory with `pip install -e` (https://github.com/ansible/ansible/pull/37151) - Fix for ansible_*_interpreter on Python3 when using non-newstyle modules. Those include old-style ansible modules and Ansible modules written in non-python scripting languages (https://github.com/ansible/ansible/pull/36541) - Fix bytes/text handling in maven_artifact that was causing tracebacks on Python3 - znode - fixed a bug calling the zookeeper API under Python3 https://github.com/ansible/ansible/pull/36999 - Fix for unarchive when users use the --strip-components extra_opt to tar causing ansible to set permissions on the wrong directory. (https://github.com/ansible/ansible/pull/37048) - fixed templating issues in loop_control (https://github.com/ansible/ansible/pull/36124) - ansible-config - fixed traceback when no config file is present (https://github.com/ansible/ansible/issues/35965) - added various fixes to Linux virtualization facts (https://github.com/ansible/ansible/issues/36038) - fixed failure when remote_tmp is a subdir of a system tempdir (https://github.com/ansible/ansible/pull/36143) - ios_ping - updated to allow for count > 70 (https://github.com/ansible/ansible/pull/36142) - fix for ansible-vault always requesting passwords (https://github.com/ansible/ansible/issues/33027) - ios CLI - fixed prompt detection (https://github.com/ansible/ansible/issues/35662) - nxos_user - fixed structured output issue (https://github.com/ansible/ansible/pull/36193) - nxos_* modules - various fixes (https://github.com/ansible/ansible/pull/36340) - nxos_* modules - various fixes (https://github.com/ansible/ansible/pull/36374) - nxos_install_os - kickstart_image_file is no longer required (https://github.com/ansible/ansible/pull/36319) - script/patch - fixed tempfile ownership issues (https://github.com/ansible/ansible/issues/36398) - nxos_bgp_neighbor - fixed various module arg issues (https://github.com/ansible/ansible/pull/36318) - vyos_l3_interface - fixed issues with multiple addresses on an interface (https://github.com/ansible/ansible/pull/36377) - nxos_banner - fixed issues with unstructured output (https://github.com/ansible/ansible/pull/36411) - nxos_bgp_neighbor_af - fixed various issues (https://github.com/ansible/ansible/pull/36472) - vyos_config - fixed IndexError in sanitize_config (https://github.com/ansible/ansible/pull/36375) - cs_user - fixed user_api_secret return for ACS 4.10+ (https://github.com/ansible/ansible/pull/36447) - nxos_* modules - various fixes (https://github.com/ansible/ansible/pull/36514) - fix cases where INVENTORY_UNPARSED_IS_FAILED didn't fail (https://github.com/ansible/ansible/issues/36034) - aws_ses_identity - fixed failure on missing identity info (https://github.com/ansible/ansible/issues/36065) - ec2_vpc_net_facts - fixed traceback for regions other than us-east-1 (https://github.com/ansible/ansible/pull/35302) - aws_waf_* - fixed traceback on WAFStaleDataException (https://github.com/ansible/ansible/pull/36405) - ec2_group - fixed check_mode when using tags (https://github.com/ansible/ansible/pull/36503) - loop item labels will now update if templated (https://github.com/ansible/ansible/pull/36430) - (network)_vlan / (network)_vrf - decouple config/state check (https://github.com/ansible/ansible/pull/36704) - nxos_vlan / nxos_linkagg - fixed various issues (https://github.com/ansible/ansible/pull/36711) - nios - allow ib_spec attrs to be filtered in update (https://github.com/ansible/ansible/pull/36673) - nso_config / nso_verify - fixed various issues (https://github.com/ansible/ansible/pull/36583) - cs_sshkeypair - fixed ssh key rename (https://github.com/ansible/ansible/pull/36726) - cliconf - fixed get_config traceback (https://github.com/ansible/ansible/pull/36682) - impi_boot - added floppy option (https://github.com/ansible/ansible/pull/36174) - nso_config - fixed ordering issues (https://github.com/ansible/ansible/pull/36774) - nxos_facts - fixed ipv6 parsing issues on new nxos releases (https://github.com/ansible/ansible/pull/36796) - nso_config - fixed dependency sort cycle issue (https://github.com/ansible/ansible/pull/36828) - ovirt_* - various fixes (https://github.com/ansible/ansible/pull/36828) - aws_ssm_parameter_store - added no_log to value arg (https://github.com/ansible/ansible/pull/36828) - openshift_raw - fixed creation of RoleBinding resources (https://github.com/ansible/ansible/pull/36887) - nxos_interface - fixed multiple issues (https://github.com/ansible/ansible/pull/36827) - junos_command - fixed Python3 issues (https://github.com/ansible/ansible/pull/36782) - ios_static_route - fixed idempotence issue (https://github.com/ansible/ansible/pull/35912) - terraform - fixed typo in module result stdout value (https://github.com/ansible/ansible/pull/37253) - setup - ensure that `ansible_lo` is properly nested under ansible_facts (https://github.com/ansible/ansible/pull/37360) - vmware_guest_snapshot - updated to always check for root snapshot (https://github.com/ansible/ansible/pull/36001) - vyos - added fixes to check mode support (https://github.com/ansible/ansible/pull/35977) - vyos_l3_interface - added support for localhost (https://github.com/ansible/ansible/pull/36141) - win_domain_controller - updated to only specify ReadOnlyReplica when necessary (https://github.com/ansible/ansible/pull/36017) - win_feature - will display a more helpful error when it fails during execution (https://github.com/ansible/ansible/pull/36491) - win_lineinfile - fixed issue where \r and \n as a string was converted to newline (https://github.com/ansible/ansible/pull/35100) - win_updates - fixed regression with string category names (https://github.com/ansible/ansible/pull/36015) - win_uri - return response info and content on a non 200 message - win_uri - fixed issues with the creates and removes options (https://github.com/ansible/ansible/pull/36016) - win_wait_for - fixed issue when trying to check a localport when the port is not available externally ansible-2.5.1/changelogs/CHANGELOG.rst0000644000000000000000000000003013265756177017241 0ustar rootroot00000000000000TODO: unified changelog ansible-2.5.1/contrib/0000755000000000000000000000000013265756221014543 5ustar rootroot00000000000000ansible-2.5.1/contrib/inventory/0000755000000000000000000000000013265756221016600 5ustar rootroot00000000000000ansible-2.5.1/contrib/inventory/abiquo.ini0000644000000000000000000000315613265756155020574 0ustar rootroot00000000000000# Ansible external inventory script settings for Abiquo # # Define an Abiquo user with access to Abiquo API which will be used to # perform required queries to obtain information to generate the Ansible # inventory output. # [auth] apiuser = admin apipass = xabiquo # Specify Abiquo API version in major.minor format and the access URI to # API endpoint. Tested versions are: 2.6 , 3.0 and 3.1 # To confirm that your box haves access to Abiquo API you can perform a # curl command, replacing with suitable values, similar to this: # curl -X GET https://192.168.2.100/api/login -u admin:xabiquo # [api] version = 3.0 uri = https://192.168.2.100/api # You probably won't need to modify login preferences, but just in case login_path = /login login_type = application/vnd.abiquo.user+json # To avoid performing excessive calls to Abiquo API you can define a # cache for the plugin output. Within the time defined in seconds, latest # output will be reused. After that time, the cache will be refreshed. # [cache] cache_max_age = 30 cache_dir = /tmp [defaults] # Depending in your Abiquo environment, you may want to use only public IP # addresses (if using public cloud providers) or also private IP addresses. # You can set this with public_ip_only configuration. public_ip_only = false # default_net_interface only is used if public_ip_only = false # If public_ip_only is set to false, you can choose default nic to obtain # IP address to define the host. default_net_interface = nic0 # Only deployed VM are displayed in the plugin output. deployed_only = true # Define if VM metadata is obtained from Abiquo API. get_metadata = false ansible-2.5.1/contrib/inventory/abiquo.py0000755000000000000000000002120213265756155020440 0ustar rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- ''' External inventory script for Abiquo ==================================== Shamelessly copied from an existing inventory script. This script generates an inventory that Ansible can understand by making API requests to Abiquo API Requires some python libraries, ensure to have them installed when using this script. This script has been tested in Abiquo 3.0 but it may work also for Abiquo 2.6. Before using this script you may want to modify abiquo.ini config file. This script generates an Ansible hosts file with these host groups: ABQ_xxx: Defines a hosts itself by Abiquo VM name label all: Contains all hosts defined in Abiquo user's enterprise virtualdatecenter: Creates a host group for each virtualdatacenter containing all hosts defined on it virtualappliance: Creates a host group for each virtualappliance containing all hosts defined on it imagetemplate: Creates a host group for each image template containing all hosts using it ''' # (c) 2014, Daniel Beneyto # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . import os import sys import time import ConfigParser try: import json except ImportError: import simplejson as json from ansible.module_utils.urls import open_url def api_get(link, config): try: if link is None: url = config.get('api', 'uri') + config.get('api', 'login_path') headers = {"Accept": config.get('api', 'login_type')} else: url = link['href'] + '?limit=0' headers = {"Accept": link['type']} result = open_url(url, headers=headers, url_username=config.get('auth', 'apiuser').replace('\n', ''), url_password=config.get('auth', 'apipass').replace('\n', '')) return json.loads(result.read()) except: return None def save_cache(data, config): ''' saves item to cache ''' dpath = config.get('cache', 'cache_dir') try: cache = open('/'.join([dpath, 'inventory']), 'w') cache.write(json.dumps(data)) cache.close() except IOError as e: pass # not really sure what to do here def get_cache(cache_item, config): ''' returns cached item ''' dpath = config.get('cache', 'cache_dir') inv = {} try: cache = open('/'.join([dpath, 'inventory']), 'r') inv = cache.read() cache.close() except IOError as e: pass # not really sure what to do here return inv def cache_available(config): ''' checks if we have a 'fresh' cache available for item requested ''' if config.has_option('cache', 'cache_dir'): dpath = config.get('cache', 'cache_dir') try: existing = os.stat('/'.join([dpath, 'inventory'])) except: # cache doesn't exist or isn't accessible return False if config.has_option('cache', 'cache_max_age'): maxage = config.get('cache', 'cache_max_age') if (int(time.time()) - int(existing.st_mtime)) <= int(maxage): return True return False def generate_inv_from_api(enterprise_entity, config): try: inventory['all'] = {} inventory['all']['children'] = [] inventory['all']['hosts'] = [] inventory['_meta'] = {} inventory['_meta']['hostvars'] = {} enterprise = api_get(enterprise_entity, config) vms_entity = next(link for link in enterprise['links'] if link['rel'] == 'virtualmachines') vms = api_get(vms_entity, config) for vmcollection in vms['collection']: for link in vmcollection['links']: if link['rel'] == 'virtualappliance': vm_vapp = link['title'].replace('[', '').replace(']', '').replace(' ', '_') elif link['rel'] == 'virtualdatacenter': vm_vdc = link['title'].replace('[', '').replace(']', '').replace(' ', '_') elif link['rel'] == 'virtualmachinetemplate': vm_template = link['title'].replace('[', '').replace(']', '').replace(' ', '_') # From abiquo.ini: Only adding to inventory VMs with public IP if config.getboolean('defaults', 'public_ip_only') is True: for link in vmcollection['links']: if link['type'] == 'application/vnd.abiquo.publicip+json' and link['rel'] == 'ip': vm_nic = link['title'] break else: vm_nic = None # Otherwise, assigning defined network interface IP address else: for link in vmcollection['links']: if link['rel'] == config.get('defaults', 'default_net_interface'): vm_nic = link['title'] break else: vm_nic = None vm_state = True # From abiquo.ini: Only adding to inventory VMs deployed if config.getboolean('defaults', 'deployed_only') is True and vmcollection['state'] == 'NOT_ALLOCATED': vm_state = False if vm_nic is not None and vm_state: if vm_vapp not in inventory: inventory[vm_vapp] = {} inventory[vm_vapp]['children'] = [] inventory[vm_vapp]['hosts'] = [] if vm_vdc not in inventory: inventory[vm_vdc] = {} inventory[vm_vdc]['hosts'] = [] inventory[vm_vdc]['children'] = [] if vm_template not in inventory: inventory[vm_template] = {} inventory[vm_template]['children'] = [] inventory[vm_template]['hosts'] = [] if config.getboolean('defaults', 'get_metadata') is True: meta_entity = next(link for link in vmcollection['links'] if link['rel'] == 'metadata') try: metadata = api_get(meta_entity, config) if (config.getfloat("api", "version") >= 3.0): vm_metadata = metadata['metadata'] else: vm_metadata = metadata['metadata']['metadata'] inventory['_meta']['hostvars'][vm_nic] = vm_metadata except Exception as e: pass inventory[vm_vapp]['children'].append(vmcollection['name']) inventory[vm_vdc]['children'].append(vmcollection['name']) inventory[vm_template]['children'].append(vmcollection['name']) inventory['all']['children'].append(vmcollection['name']) inventory[vmcollection['name']] = [] inventory[vmcollection['name']].append(vm_nic) return inventory except Exception as e: # Return empty hosts output return {'all': {'hosts': []}, '_meta': {'hostvars': {}}} def get_inventory(enterprise, config): ''' Reads the inventory from cache or Abiquo api ''' if cache_available(config): inv = get_cache('inventory', config) else: default_group = os.path.basename(sys.argv[0]).rstrip('.py') # MAKE ABIQUO API CALLS # inv = generate_inv_from_api(enterprise, config) save_cache(inv, config) return json.dumps(inv) if __name__ == '__main__': inventory = {} enterprise = {} # Read config config = ConfigParser.SafeConfigParser() for configfilename in [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'abiquo.ini']: if os.path.exists(configfilename): config.read(configfilename) break try: login = api_get(None, config) enterprise = next(link for link in login['links'] if link['rel'] == 'enterprise') except Exception as e: enterprise = None if cache_available(config): inventory = get_cache('inventory', config) else: inventory = get_inventory(enterprise, config) # return to ansible sys.stdout.write(str(inventory)) sys.stdout.flush() ansible-2.5.1/contrib/inventory/apache-libcloud.py0000755000000000000000000002666113265756155022212 0ustar rootroot00000000000000#!/usr/bin/env python # (c) 2013, Sebastien Goasguen # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### ''' Apache Libcloud generic external inventory script ================================= Generates inventory that Ansible can understand by making API request to Cloud providers using the Apache libcloud library. This script also assumes there is a libcloud.ini file alongside it ''' import sys import os import argparse import re from time import time import ConfigParser from six import iteritems, string_types from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver import libcloud.security as sec try: import json except ImportError: import simplejson as json class LibcloudInventory(object): def __init__(self): ''' Main execution path ''' # Inventory grouped by instance IDs, tags, security groups, regions, # and availability zones self.inventory = {} # Index of hostname (address) to instance ID self.index = {} # Read settings and parse CLI arguments self.read_settings() self.parse_cli_args() # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() # Data to print if self.args.host: data_to_print = self.get_host_info() elif self.args.list: # Display list of instances for inventory if len(self.inventory) == 0: data_to_print = self.get_inventory_from_cache() else: data_to_print = self.json_format_dict(self.inventory, True) print(data_to_print) def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): ''' Reads the settings from the libcloud.ini file ''' config = ConfigParser.SafeConfigParser() libcloud_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'libcloud.ini') libcloud_ini_path = os.environ.get('LIBCLOUD_INI_PATH', libcloud_default_ini_path) config.read(libcloud_ini_path) if not config.has_section('driver'): raise ValueError('libcloud.ini file must contain a [driver] section') if config.has_option('driver', 'provider'): self.provider = config.get('driver', 'provider') else: raise ValueError('libcloud.ini does not have a provider defined') if config.has_option('driver', 'key'): self.key = config.get('driver', 'key') else: raise ValueError('libcloud.ini does not have a key defined') if config.has_option('driver', 'secret'): self.secret = config.get('driver', 'secret') else: raise ValueError('libcloud.ini does not have a secret defined') if config.has_option('driver', 'host'): self.host = config.get('driver', 'host') if config.has_option('driver', 'secure'): self.secure = config.get('driver', 'secure') if config.has_option('driver', 'verify_ssl_cert'): self.verify_ssl_cert = config.get('driver', 'verify_ssl_cert') if config.has_option('driver', 'port'): self.port = config.get('driver', 'port') if config.has_option('driver', 'path'): self.path = config.get('driver', 'path') if config.has_option('driver', 'api_version'): self.api_version = config.get('driver', 'api_version') Driver = get_driver(getattr(Provider, self.provider)) self.conn = Driver(key=self.key, secret=self.secret, secure=self.secure, host=self.host, path=self.path) # Cache related cache_path = config.get('cache', 'cache_path') self.cache_path_cache = cache_path + "/ansible-libcloud.cache" self.cache_path_index = cache_path + "/ansible-libcloud.index" self.cache_max_age = config.getint('cache', 'cache_max_age') def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on libcloud supported providers') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to libcloud supported providers (default: False - use cache files)') self.args = parser.parse_args() def do_api_calls_update_cache(self): ''' Do API calls to a location, and save data in cache files ''' self.get_nodes() self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def get_nodes(self): ''' Gets the list of all nodes ''' for node in self.conn.list_nodes(): self.add_node(node) def get_node(self, node_id): ''' Gets details about a specific node ''' return [node for node in self.conn.list_nodes() if node.id == node_id][0] def add_node(self, node): ''' Adds a node to the inventory and index, as long as it is addressable ''' # Only want running instances if node.state != 0: return # Select the best destination address if not node.public_ips == []: dest = node.public_ips[0] if not dest: # Skip instances we cannot address (e.g. private VPC subnet) return # Add to index self.index[dest] = node.name # Inventory: Group by instance ID (always a group of 1) self.inventory[node.name] = [dest] ''' # Inventory: Group by region self.push(self.inventory, region, dest) # Inventory: Group by availability zone self.push(self.inventory, node.placement, dest) # Inventory: Group by instance type self.push(self.inventory, self.to_safe('type_' + node.instance_type), dest) ''' # Inventory: Group by key pair if node.extra['key_name']: self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest) # Inventory: Group by security group, quick thing to handle single sg if node.extra['security_group']: self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest) # Inventory: Group by tag if node.extra['tags']: for tagkey in node.extra['tags'].keys(): self.push(self.inventory, self.to_safe('tag_' + tagkey + '_' + node.extra['tags'][tagkey]), dest) def get_host_info(self): ''' Get variables about a specific host ''' if len(self.index) == 0: # Need to load index from cache self.load_index_from_cache() if self.args.host not in self.index: # try updating the cache self.do_api_calls_update_cache() if self.args.host not in self.index: # host migh not exist anymore return self.json_format_dict({}, True) node_id = self.index[self.args.host] node = self.get_node(node_id) instance_vars = {} for key, value in vars(node).items(): key = self.to_safe('ec2_' + key) # Handle complex types if isinstance(value, (int, bool)): instance_vars[key] = value elif isinstance(value, string_types): instance_vars[key] = value.strip() elif value is None: instance_vars[key] = '' elif key == 'ec2_region': instance_vars[key] = value.name elif key == 'ec2_tags': for k, v in iteritems(value): key = self.to_safe('ec2_tag_' + k) instance_vars[key] = v elif key == 'ec2_groups': group_ids = [] group_names = [] for group in value: group_ids.append(group.id) group_names.append(group.name) instance_vars["ec2_security_group_ids"] = ','.join(group_ids) instance_vars["ec2_security_group_names"] = ','.join(group_names) else: pass # TODO Product codes if someone finds them useful # print(key) # print(type(value)) # print(value) return self.json_format_dict(instance_vars, True) def push(self, my_dict, key, element): ''' Pushed an element onto an array that may not have been defined in the dict ''' if key in my_dict: my_dict[key].append(element) else: my_dict[key] = [element] def get_inventory_from_cache(self): ''' Reads the inventory from the cache file and returns it as a JSON object ''' cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() return json_inventory def load_index_from_cache(self): ''' Reads the index from the cache file sets self.index ''' cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index) def write_to_cache(self, data, filename): ''' Writes data in JSON format to a file ''' json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' return re.sub(r"[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) def main(): LibcloudInventory() if __name__ == '__main__': main() ansible-2.5.1/contrib/inventory/apstra_aos.ini0000644000000000000000000000106213265756155021442 0ustar rootroot00000000000000# Ansible Apstra AOS external inventory script settings # Dynamic Inventory script parameter can be provided using this file # Or by using Environment Variables: # - AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT # # This file takes precedence over the Environment Variables # [aos] # aos_server = 172.20.62.3 # port = 8888 # username = admin # password = admin ## Blueprint Mode # to use the inventory in mode Blueprint, you need to define the blueprint name you want to use # blueprint = my-blueprint-l2 # blueprint_interface = true ansible-2.5.1/contrib/inventory/apstra_aos.py0000755000000000000000000004765713265756155021342 0ustar rootroot00000000000000#!/usr/bin/env python # # (c) 2017 Apstra Inc, # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # """ Apstra AOS external inventory script ==================================== Ansible has a feature where instead of reading from /etc/ansible/hosts as a text file, it can query external programs to obtain the list of hosts, groups the hosts are in, and even variables to assign to each host. To use this: - copy this file over /etc/ansible/hosts and chmod +x the file. - Copy both files (.py and .ini) in your preferred directory More information about Ansible Dynamic Inventory here http://unix.stackexchange.com/questions/205479/in-ansible-dynamic-inventory-json-can-i-render-hostvars-based-on-the-hostname 2 modes are currently, supported: **device based** or **blueprint based**: - For **Device based**, the list of device is taken from the global device list the serial ID will be used as the inventory_hostname - For **Blueprint based**, the list of device is taken from the given blueprint the Node name will be used as the inventory_hostname Input parameters parameter can be provided using either with the ini file or by using Environment Variables: The following list of Environment Variables are supported: AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT The config file takes precedence over the Environment Variables Tested with Apstra AOS 1.1 This script has been inspired by the cobbler.py inventory. thanks Author: Damien Garros (@dgarros) Version: 0.2.0 """ import json import os import re import sys try: import argparse HAS_ARGPARSE = True except ImportError: HAS_ARGPARSE = False try: from apstra.aosom.session import Session HAS_AOS_PYEZ = True except ImportError: HAS_AOS_PYEZ = False from ansible.module_utils.six.moves import configparser """ ## Expected output format in Device mode { "Cumulus": { "hosts": [ "52540073956E", "52540022211A" ], "vars": {} }, "EOS": { "hosts": [ "5254001CAFD8", "525400DDDF72" ], "vars": {} }, "Generic Model": { "hosts": [ "525400E5486D" ], "vars": {} }, "Ubuntu GNU/Linux": { "hosts": [ "525400E5486D" ], "vars": {} }, "VX": { "hosts": [ "52540073956E", "52540022211A" ], "vars": {} }, "_meta": { "hostvars": { "5254001CAFD8": { "agent_start_time": "2017-02-03T00:49:16.000000Z", "ansible_ssh_host": "172.20.52.6", "aos_hcl_model": "Arista_vEOS", "aos_server": "", "aos_version": "AOS_1.1.1_OB.5", "comm_state": "on", "device_start_time": "2017-02-03T00:47:58.454480Z", "domain_name": "", "error_message": "", "fqdn": "localhost", "hostname": "localhost", "hw_model": "vEOS", "hw_version": "", "is_acknowledged": false, "mgmt_ifname": "Management1", "mgmt_ipaddr": "172.20.52.6", "mgmt_macaddr": "52:54:00:1C:AF:D8", "os_arch": "x86_64", "os_family": "EOS", "os_version": "4.16.6M", "os_version_info": { "build": "6M", "major": "4", "minor": "16" }, "serial_number": "5254001CAFD8", "state": "OOS-QUARANTINED", "vendor": "Arista" }, "52540022211A": { "agent_start_time": "2017-02-03T00:45:22.000000Z", "ansible_ssh_host": "172.20.52.7", "aos_hcl_model": "Cumulus_VX", "aos_server": "172.20.52.3", "aos_version": "AOS_1.1.1_OB.5", "comm_state": "on", "device_start_time": "2017-02-03T00:45:11.019189Z", "domain_name": "", "error_message": "", "fqdn": "cumulus", "hostname": "cumulus", "hw_model": "VX", "hw_version": "", "is_acknowledged": false, "mgmt_ifname": "eth0", "mgmt_ipaddr": "172.20.52.7", "mgmt_macaddr": "52:54:00:22:21:1a", "os_arch": "x86_64", "os_family": "Cumulus", "os_version": "3.1.1", "os_version_info": { "build": "1", "major": "3", "minor": "1" }, "serial_number": "52540022211A", "state": "OOS-QUARANTINED", "vendor": "Cumulus" }, "52540073956E": { "agent_start_time": "2017-02-03T00:45:19.000000Z", "ansible_ssh_host": "172.20.52.8", "aos_hcl_model": "Cumulus_VX", "aos_server": "172.20.52.3", "aos_version": "AOS_1.1.1_OB.5", "comm_state": "on", "device_start_time": "2017-02-03T00:45:11.030113Z", "domain_name": "", "error_message": "", "fqdn": "cumulus", "hostname": "cumulus", "hw_model": "VX", "hw_version": "", "is_acknowledged": false, "mgmt_ifname": "eth0", "mgmt_ipaddr": "172.20.52.8", "mgmt_macaddr": "52:54:00:73:95:6e", "os_arch": "x86_64", "os_family": "Cumulus", "os_version": "3.1.1", "os_version_info": { "build": "1", "major": "3", "minor": "1" }, "serial_number": "52540073956E", "state": "OOS-QUARANTINED", "vendor": "Cumulus" }, "525400DDDF72": { "agent_start_time": "2017-02-03T00:49:07.000000Z", "ansible_ssh_host": "172.20.52.5", "aos_hcl_model": "Arista_vEOS", "aos_server": "", "aos_version": "AOS_1.1.1_OB.5", "comm_state": "on", "device_start_time": "2017-02-03T00:47:46.929921Z", "domain_name": "", "error_message": "", "fqdn": "localhost", "hostname": "localhost", "hw_model": "vEOS", "hw_version": "", "is_acknowledged": false, "mgmt_ifname": "Management1", "mgmt_ipaddr": "172.20.52.5", "mgmt_macaddr": "52:54:00:DD:DF:72", "os_arch": "x86_64", "os_family": "EOS", "os_version": "4.16.6M", "os_version_info": { "build": "6M", "major": "4", "minor": "16" }, "serial_number": "525400DDDF72", "state": "OOS-QUARANTINED", "vendor": "Arista" }, "525400E5486D": { "agent_start_time": "2017-02-02T18:44:42.000000Z", "ansible_ssh_host": "172.20.52.4", "aos_hcl_model": "Generic_Server_1RU_1x10G", "aos_server": "172.20.52.3", "aos_version": "AOS_1.1.1_OB.5", "comm_state": "on", "device_start_time": "2017-02-02T21:11:25.188734Z", "domain_name": "", "error_message": "", "fqdn": "localhost", "hostname": "localhost", "hw_model": "Generic Model", "hw_version": "pc-i440fx-trusty", "is_acknowledged": false, "mgmt_ifname": "eth0", "mgmt_ipaddr": "172.20.52.4", "mgmt_macaddr": "52:54:00:e5:48:6d", "os_arch": "x86_64", "os_family": "Ubuntu GNU/Linux", "os_version": "14.04 LTS", "os_version_info": { "build": "", "major": "14", "minor": "04" }, "serial_number": "525400E5486D", "state": "OOS-QUARANTINED", "vendor": "Generic Manufacturer" } } }, "all": { "hosts": [ "5254001CAFD8", "52540073956E", "525400DDDF72", "525400E5486D", "52540022211A" ], "vars": {} }, "vEOS": { "hosts": [ "5254001CAFD8", "525400DDDF72" ], "vars": {} } } """ def fail(msg): sys.stderr.write("%s\n" % msg) sys.exit(1) class AosInventory(object): def __init__(self): """ Main execution path """ if not HAS_AOS_PYEZ: raise Exception('aos-pyez is not installed. Please see details here: https://github.com/Apstra/aos-pyez') if not HAS_ARGPARSE: raise Exception('argparse is not installed. Please install the argparse library or upgrade to python-2.7') # Initialize inventory self.inventory = dict() # A list of groups and the hosts in that group self.inventory['_meta'] = dict() self.inventory['_meta']['hostvars'] = dict() # Read settings and parse CLI arguments self.read_settings() self.parse_cli_args() # ---------------------------------------------------- # Open session to AOS # ---------------------------------------------------- aos = Session(server=self.aos_server, port=self.aos_server_port, user=self.aos_username, passwd=self.aos_password) aos.login() # Save session information in variables of group all self.add_var_to_group('all', 'aos_session', aos.session) # Add the AOS server itself in the inventory self.add_host_to_group("all", 'aos') self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server) self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password) self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username) # ---------------------------------------------------- # Build the inventory # 2 modes are supported: device based or blueprint based # - For device based, the list of device is taken from the global device list # the serial ID will be used as the inventory_hostname # - For Blueprint based, the list of device is taken from the given blueprint # the Node name will be used as the inventory_hostname # ---------------------------------------------------- if self.aos_blueprint: bp = aos.Blueprints[self.aos_blueprint] if bp.exists is False: fail("Unable to find the Blueprint: %s" % self.aos_blueprint) for dev_name, dev_id in bp.params['devices'].value.items(): self.add_host_to_group('all', dev_name) device = aos.Devices.find(uid=dev_id) if 'facts' in device.value.keys(): self.add_device_facts_to_var(dev_name, device) # Define admin State and Status if 'user_config' in device.value.keys(): if 'admin_state' in device.value['user_config'].keys(): self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state']) self.add_device_status_to_var(dev_name, device) # Go over the contents data structure for node in bp.contents['system']['nodes']: if node['display_name'] == dev_name: self.add_host_to_group(node['role'], dev_name) # Check for additional attribute to import attributes_to_import = [ 'loopback_ip', 'asn', 'role', 'position', ] for attr in attributes_to_import: if attr in node.keys(): self.add_var_to_host(dev_name, attr, node[attr]) # if blueprint_interface is enabled in the configuration # Collect links information if self.aos_blueprint_int: interfaces = dict() for link in bp.contents['system']['links']: # each link has 2 sides [0,1], and it's unknown which one match this device # at first we assume, first side match(0) and peer is (1) peer_id = 1 for side in link['endpoints']: if side['display_name'] == dev_name: # import local information first int_name = side['interface'] # init dict interfaces[int_name] = dict() if 'ip' in side.keys(): interfaces[int_name]['ip'] = side['ip'] if 'interface' in side.keys(): interfaces[int_name]['name'] = side['interface'] if 'display_name' in link['endpoints'][peer_id].keys(): interfaces[int_name]['peer'] = link['endpoints'][peer_id]['display_name'] if 'ip' in link['endpoints'][peer_id].keys(): interfaces[int_name]['peer_ip'] = link['endpoints'][peer_id]['ip'] if 'type' in link['endpoints'][peer_id].keys(): interfaces[int_name]['peer_type'] = link['endpoints'][peer_id]['type'] else: # if we haven't match the first time, prepare the peer_id # for the second loop iteration peer_id = 0 self.add_var_to_host(dev_name, 'interfaces', interfaces) else: for device in aos.Devices: # If not reacheable, create by key and # If reacheable, create by hostname self.add_host_to_group('all', device.name) # populate information for this host self.add_device_status_to_var(device.name, device) if 'user_config' in device.value.keys(): for key, value in device.value['user_config'].items(): self.add_var_to_host(device.name, key, value) # Based on device status online|offline, collect facts as well if device.value['status']['comm_state'] == 'on': if 'facts' in device.value.keys(): self.add_device_facts_to_var(device.name, device) # Check if device is associated with a blueprint # if it's create a new group if 'blueprint_active' in device.value['status'].keys(): if 'blueprint_id' in device.value['status'].keys(): bp = aos.Blueprints.find(uid=device.value['status']['blueprint_id']) if bp: self.add_host_to_group(bp.name, device.name) # ---------------------------------------------------- # Convert the inventory and return a JSON String # ---------------------------------------------------- data_to_print = "" data_to_print += self.json_format_dict(self.inventory, True) print(data_to_print) def read_settings(self): """ Reads the settings from the apstra_aos.ini file """ config = configparser.ConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/apstra_aos.ini') # Default Values self.aos_blueprint = False self.aos_blueprint_int = True self.aos_username = 'admin' self.aos_password = 'admin' self.aos_server_port = 8888 # Try to reach all parameters from File, if not available try from ENV try: self.aos_server = config.get('aos', 'aos_server') except: if 'AOS_SERVER' in os.environ.keys(): self.aos_server = os.environ['AOS_SERVER'] try: self.aos_server_port = config.get('aos', 'port') except: if 'AOS_PORT' in os.environ.keys(): self.aos_server_port = os.environ['AOS_PORT'] try: self.aos_username = config.get('aos', 'username') except: if 'AOS_USERNAME' in os.environ.keys(): self.aos_username = os.environ['AOS_USERNAME'] try: self.aos_password = config.get('aos', 'password') except: if 'AOS_PASSWORD' in os.environ.keys(): self.aos_password = os.environ['AOS_PASSWORD'] try: self.aos_blueprint = config.get('aos', 'blueprint') except: if 'AOS_BLUEPRINT' in os.environ.keys(): self.aos_blueprint = os.environ['AOS_BLUEPRINT'] try: if config.get('aos', 'blueprint_interface') in ['false', 'no']: self.aos_blueprint_int = False except: pass def parse_cli_args(self): """ Command line argument processing """ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Apstra AOS') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') self.args = parser.parse_args() def json_format_dict(self, data, pretty=False): """ Converts a dict to a JSON object and dumps it as a formatted string """ if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) def add_host_to_group(self, group, host): # Cleanup group name first clean_group = self.cleanup_group_name(group) # Check if the group exist, if not initialize it if clean_group not in self.inventory.keys(): self.inventory[clean_group] = {} self.inventory[clean_group]['hosts'] = [] self.inventory[clean_group]['vars'] = {} self.inventory[clean_group]['hosts'].append(host) def add_var_to_host(self, host, var, value): # Check if the host exist, if not initialize it if host not in self.inventory['_meta']['hostvars'].keys(): self.inventory['_meta']['hostvars'][host] = {} self.inventory['_meta']['hostvars'][host][var] = value def add_var_to_group(self, group, var, value): # Cleanup group name first clean_group = self.cleanup_group_name(group) # Check if the group exist, if not initialize it if clean_group not in self.inventory.keys(): self.inventory[clean_group] = {} self.inventory[clean_group]['hosts'] = [] self.inventory[clean_group]['vars'] = {} self.inventory[clean_group]['vars'][var] = value def add_device_facts_to_var(self, device_name, device): # Populate variables for this host self.add_var_to_host(device_name, 'ansible_ssh_host', device.value['facts']['mgmt_ipaddr']) self.add_var_to_host(device_name, 'id', device.id) # self.add_host_to_group('all', device.name) for key, value in device.value['facts'].items(): self.add_var_to_host(device_name, key, value) if key == 'os_family': self.add_host_to_group(value, device_name) elif key == 'hw_model': self.add_host_to_group(value, device_name) def cleanup_group_name(self, group_name): """ Clean up group name by : - Replacing all non-alphanumeric caracter by underscore - Converting to lowercase """ rx = re.compile(r'\W+') clean_group = rx.sub('_', group_name).lower() return clean_group def add_device_status_to_var(self, device_name, device): if 'status' in device.value.keys(): for key, value in device.value['status'].items(): self.add_var_to_host(device.name, key, value) # Run the script if __name__ == '__main__': AosInventory() ansible-2.5.1/contrib/inventory/azure_rm.ini0000644000000000000000000000143613265756155021137 0ustar rootroot00000000000000# # Configuration file for azure_rm.py # [azure] # Control which resource groups are included. By default all resources groups are included. # Set resource_groups to a comma separated list of resource groups names. #resource_groups= # Control which tags are included. Set tags to a comma separated list of keys or key:value pairs #tags= # Control which locations are included. Set locations to a comma separated list (e.g. eastus,eastus2,westus) #locations= # Include powerstate. If you don't need powerstate information, turning it off improves runtime performance. include_powerstate=yes # Control grouping with the following boolean flags. Valid values: yes, no, true, false, True, False, 0, 1. group_by_resource_group=yes group_by_location=yes group_by_security_group=yes group_by_tag=yes ansible-2.5.1/contrib/inventory/azure_rm.py0000755000000000000000000010463513265756155021020 0ustar rootroot00000000000000#!/usr/bin/env python # # Copyright (c) 2016 Matt Davis, # Chris Houseknecht, # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # ''' Azure External Inventory Script =============================== Generates dynamic inventory by making API requests to the Azure Resource Manager using the Azure Python SDK. For instruction on installing the Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/ Authentication -------------- The order of precedence is command line arguments, environment variables, and finally the [default] profile found in ~/.azure/credentials. If using a credentials file, it should be an ini formatted file with one or more sections, which we refer to as profiles. The script looks for a [default] section, if a profile is not specified either on the command line or with an environment variable. The keys in a profile will match the list of command line arguments below. For command line arguments and environment variables specify a profile found in your ~/.azure/credentials file, or a service principal or Active Directory user. Command line arguments: - profile - client_id - secret - subscription_id - tenant - ad_user - password - cloud_environment Environment variables: - AZURE_PROFILE - AZURE_CLIENT_ID - AZURE_SECRET - AZURE_SUBSCRIPTION_ID - AZURE_TENANT - AZURE_AD_USER - AZURE_PASSWORD - AZURE_CLOUD_ENVIRONMENT Run for Specific Host ----------------------- When run for a specific host using the --host option, a resource group is required. For a specific host, this script returns the following variables: { "ansible_host": "XXX.XXX.XXX.XXX", "computer_name": "computer_name2", "fqdn": null, "id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name", "image": { "offer": "CentOS", "publisher": "OpenLogic", "sku": "7.1", "version": "latest" }, "location": "westus", "mac_address": "00-00-5E-00-53-FE", "name": "object-name", "network_interface": "interface-name", "network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1", "network_security_group": null, "network_security_group_id": null, "os_disk": { "name": "object-name", "operating_system_type": "Linux" }, "plan": null, "powerstate": "running", "private_ip": "172.26.3.6", "private_ip_alloc_method": "Static", "provisioning_state": "Succeeded", "public_ip": "XXX.XXX.XXX.XXX", "public_ip_alloc_method": "Static", "public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name", "public_ip_name": "object-name", "resource_group": "galaxy-production", "security_group": "object-name", "security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name", "tags": { "db": "database" }, "type": "Microsoft.Compute/virtualMachines", "virtual_machine_size": "Standard_DS4" } Groups ------ When run in --list mode, instances are grouped by the following categories: - azure - location - resource_group - security_group - tag key - tag key_value Control groups using azure_rm.ini or set environment variables: AZURE_GROUP_BY_RESOURCE_GROUP=yes AZURE_GROUP_BY_LOCATION=yes AZURE_GROUP_BY_SECURITY_GROUP=yes AZURE_GROUP_BY_TAG=yes Select hosts within specific resource groups by assigning a comma separated list to: AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b Select hosts for specific tag key by assigning a comma separated list of tag keys to: AZURE_TAGS=key1,key2,key3 Select hosts for specific locations: AZURE_LOCATIONS=eastus,westus,eastus2 Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to: AZURE_TAGS=key1:value1,key2:value2 If you don't need the powerstate, you can improve performance by turning off powerstate fetching: AZURE_INCLUDE_POWERSTATE=no azure_rm.ini ------------ As mentioned above, you can control execution using environment variables or a .ini file. A sample azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case 'azure_rm') with a .ini extension. It also assumes the .ini file is alongside the script. To specify a different path for the .ini file, define the AZURE_INI_PATH environment variable: export AZURE_INI_PATH=/path/to/custom.ini Powerstate: ----------- The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is up. If the value is anything other than 'running', the machine is down, and will be unreachable. Examples: --------- Execute /bin/uname on all instances in the galaxy-qa resource group $ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a" Use the inventory script to print instance specific information $ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty Use with a playbook $ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa Insecure Platform Warning ------------------------- If you receive InsecurePlatformWarning from urllib3, install the requests security packages: pip install requests[security] author: - Chris Houseknecht (@chouseknecht) - Matt Davis (@nitzmahone) Company: Ansible by Red Hat Version: 1.0.0 ''' import argparse import json import os import re import sys import inspect try: # python2 import ConfigParser as cp except ImportError: # python3 import configparser as cp from packaging.version import Version from os.path import expanduser import ansible.module_utils.six.moves.urllib.parse as urlparse HAS_AZURE = True HAS_AZURE_EXC = None try: from msrestazure.azure_exceptions import CloudError from msrestazure import azure_cloud from azure.mgmt.compute import __version__ as azure_compute_version from azure.common import AzureMissingResourceHttpError, AzureHttpError from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials from azure.mgmt.network import NetworkManagementClient from azure.mgmt.resource.resources import ResourceManagementClient from azure.mgmt.compute import ComputeManagementClient except ImportError as exc: HAS_AZURE_EXC = exc HAS_AZURE = False AZURE_CREDENTIAL_ENV_MAPPING = dict( profile='AZURE_PROFILE', subscription_id='AZURE_SUBSCRIPTION_ID', client_id='AZURE_CLIENT_ID', secret='AZURE_SECRET', tenant='AZURE_TENANT', ad_user='AZURE_AD_USER', password='AZURE_PASSWORD', cloud_environment='AZURE_CLOUD_ENVIRONMENT', ) AZURE_CONFIG_SETTINGS = dict( resource_groups='AZURE_RESOURCE_GROUPS', tags='AZURE_TAGS', locations='AZURE_LOCATIONS', include_powerstate='AZURE_INCLUDE_POWERSTATE', group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP', group_by_location='AZURE_GROUP_BY_LOCATION', group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP', group_by_tag='AZURE_GROUP_BY_TAG' ) AZURE_MIN_VERSION = "2.0.0" def azure_id_to_dict(id): pieces = re.sub(r'^\/', '', id).split('/') result = {} index = 0 while index < len(pieces) - 1: result[pieces[index]] = pieces[index + 1] index += 1 return result class AzureRM(object): def __init__(self, args): self._args = args self._cloud_environment = None self._compute_client = None self._resource_client = None self._network_client = None self.debug = False if args.debug: self.debug = True self.credentials = self._get_credentials(args) if not self.credentials: self.fail("Failed to get credentials. Either pass as parameters, set environment variables, " "or define a profile in ~/.azure/credentials.") # if cloud_environment specified, look up/build Cloud object raw_cloud_env = self.credentials.get('cloud_environment') if not raw_cloud_env: self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default else: # try to look up "well-known" values via the name attribute on azure_cloud members all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)] matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env] if len(matched_clouds) == 1: self._cloud_environment = matched_clouds[0] elif len(matched_clouds) > 1: self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env)) else: if not urlparse.urlparse(raw_cloud_env).scheme: self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds])) try: self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env) except Exception as e: self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message)) if self.credentials.get('subscription_id', None) is None: self.fail("Credentials did not include a subscription_id value.") self.log("setting subscription_id") self.subscription_id = self.credentials['subscription_id'] if self.credentials.get('client_id') is not None and \ self.credentials.get('secret') is not None and \ self.credentials.get('tenant') is not None: self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'], secret=self.credentials['secret'], tenant=self.credentials['tenant'], cloud_environment=self._cloud_environment) elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None: tenant = self.credentials.get('tenant') if not tenant: tenant = 'common' self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], self.credentials['password'], tenant=tenant, cloud_environment=self._cloud_environment) else: self.fail("Failed to authenticate with provided credentials. Some attributes were missing. " "Credentials must include client_id, secret and tenant or ad_user and password.") def log(self, msg): if self.debug: print(msg + u'\n') def fail(self, msg): raise Exception(msg) def _get_profile(self, profile="default"): path = expanduser("~") path += "/.azure/credentials" try: config = cp.ConfigParser() config.read(path) except Exception as exc: self.fail("Failed to access {0}. Check that the file exists and you have read " "access. {1}".format(path, str(exc))) credentials = dict() for key in AZURE_CREDENTIAL_ENV_MAPPING: try: credentials[key] = config.get(profile, key, raw=True) except: pass if credentials.get('client_id') is not None or credentials.get('ad_user') is not None: return credentials return None def _get_env_credentials(self): env_credentials = dict() for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): env_credentials[attribute] = os.environ.get(env_variable, None) if env_credentials['profile'] is not None: credentials = self._get_profile(env_credentials['profile']) return credentials if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None: return env_credentials return None def _get_credentials(self, params): # Get authentication credentials. # Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials. self.log('Getting credentials') arg_credentials = dict() for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): arg_credentials[attribute] = getattr(params, attribute) # try module params if arg_credentials['profile'] is not None: self.log('Retrieving credentials with profile parameter.') credentials = self._get_profile(arg_credentials['profile']) return credentials if arg_credentials['client_id'] is not None: self.log('Received credentials from parameters.') return arg_credentials if arg_credentials['ad_user'] is not None: self.log('Received credentials from parameters.') return arg_credentials # try environment env_credentials = self._get_env_credentials() if env_credentials: self.log('Received credentials from env.') return env_credentials # try default profile from ~./azure/credentials default_credentials = self._get_profile() if default_credentials: self.log('Retrieved default profile credentials from ~/.azure/credentials.') return default_credentials return None def _register(self, key): try: # We have to perform the one-time registration here. Otherwise, we receive an error the first # time we attempt to use the requested client. resource_client = self.rm_client resource_client.providers.register(key) except Exception as exc: self.log("One-time registration of {0} failed - {1}".format(key, str(exc))) self.log("You might need to register {0} using an admin account".format(key)) self.log(("To register a provider using the Python CLI: " "https://docs.microsoft.com/azure/azure-resource-manager/" "resource-manager-common-deployment-errors#noregisteredproviderfound")) @property def network_client(self): self.log('Getting network client') if not self._network_client: self._network_client = NetworkManagementClient( self.azure_credentials, self.subscription_id, base_url=self._cloud_environment.endpoints.resource_manager, api_version='2017-06-01' ) self._register('Microsoft.Network') return self._network_client @property def rm_client(self): self.log('Getting resource manager client') if not self._resource_client: self._resource_client = ResourceManagementClient( self.azure_credentials, self.subscription_id, base_url=self._cloud_environment.endpoints.resource_manager, api_version='2017-05-10' ) return self._resource_client @property def compute_client(self): self.log('Getting compute client') if not self._compute_client: self._compute_client = ComputeManagementClient( self.azure_credentials, self.subscription_id, base_url=self._cloud_environment.endpoints.resource_manager, api_version='2017-03-30' ) self._register('Microsoft.Compute') return self._compute_client class AzureInventory(object): def __init__(self): self._args = self._parse_cli_args() try: rm = AzureRM(self._args) except Exception as e: sys.exit("{0}".format(str(e))) self._compute_client = rm.compute_client self._network_client = rm.network_client self._resource_client = rm.rm_client self._security_groups = None self.resource_groups = [] self.tags = None self.locations = None self.replace_dash_in_groups = False self.group_by_resource_group = True self.group_by_location = True self.group_by_security_group = True self.group_by_tag = True self.include_powerstate = True self._inventory = dict( _meta=dict( hostvars=dict() ), azure=[] ) self._get_settings() if self._args.resource_groups: self.resource_groups = self._args.resource_groups.split(',') if self._args.tags: self.tags = self._args.tags.split(',') if self._args.locations: self.locations = self._args.locations.split(',') if self._args.no_powerstate: self.include_powerstate = False self.get_inventory() print(self._json_format_dict(pretty=self._args.pretty)) sys.exit(0) def _parse_cli_args(self): # Parse command line arguments parser = argparse.ArgumentParser( description='Produce an Ansible Inventory file for an Azure subscription') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--debug', action='store_true', default=False, help='Send debug messages to STDOUT') parser.add_argument('--host', action='store', help='Get all information about an instance') parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print JSON output(default: False)') parser.add_argument('--profile', action='store', help='Azure profile contained in ~/.azure/credentials') parser.add_argument('--subscription_id', action='store', help='Azure Subscription Id') parser.add_argument('--client_id', action='store', help='Azure Client Id ') parser.add_argument('--secret', action='store', help='Azure Client Secret') parser.add_argument('--tenant', action='store', help='Azure Tenant Id') parser.add_argument('--ad_user', action='store', help='Active Directory User') parser.add_argument('--password', action='store', help='password') parser.add_argument('--cloud_environment', action='store', help='Azure Cloud Environment name or metadata discovery URL') parser.add_argument('--resource-groups', action='store', help='Return inventory for comma separated list of resource group names') parser.add_argument('--tags', action='store', help='Return inventory for comma separated list of tag key:value pairs') parser.add_argument('--locations', action='store', help='Return inventory for comma separated list of locations') parser.add_argument('--no-powerstate', action='store_true', default=False, help='Do not include the power state of each virtual host') return parser.parse_args() def get_inventory(self): if len(self.resource_groups) > 0: # get VMs for requested resource groups for resource_group in self.resource_groups: try: virtual_machines = self._compute_client.virtual_machines.list(resource_group) except Exception as exc: sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group, str(exc))) if self._args.host or self.tags: selected_machines = self._selected_machines(virtual_machines) self._load_machines(selected_machines) else: self._load_machines(virtual_machines) else: # get all VMs within the subscription try: virtual_machines = self._compute_client.virtual_machines.list_all() except Exception as exc: sys.exit("Error: fetching virtual machines - {0}".format(str(exc))) if self._args.host or self.tags or self.locations: selected_machines = self._selected_machines(virtual_machines) self._load_machines(selected_machines) else: self._load_machines(virtual_machines) def _load_machines(self, machines): for machine in machines: id_dict = azure_id_to_dict(machine.id) # TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets # fixed, we should remove the .lower(). Opened Issue # #574: https://github.com/Azure/azure-sdk-for-python/issues/574 resource_group = id_dict['resourceGroups'].lower() if self.group_by_security_group: self._get_security_groups(resource_group) host_vars = dict( ansible_host=None, private_ip=None, private_ip_alloc_method=None, public_ip=None, public_ip_name=None, public_ip_id=None, public_ip_alloc_method=None, fqdn=None, location=machine.location, name=machine.name, type=machine.type, id=machine.id, tags=machine.tags, network_interface_id=None, network_interface=None, resource_group=resource_group, mac_address=None, plan=(machine.plan.name if machine.plan else None), virtual_machine_size=machine.hardware_profile.vm_size, computer_name=(machine.os_profile.computer_name if machine.os_profile else None), provisioning_state=machine.provisioning_state, ) host_vars['os_disk'] = dict( name=machine.storage_profile.os_disk.name, operating_system_type=machine.storage_profile.os_disk.os_type.value ) if self.include_powerstate: host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name) if machine.storage_profile.image_reference: host_vars['image'] = dict( offer=machine.storage_profile.image_reference.offer, publisher=machine.storage_profile.image_reference.publisher, sku=machine.storage_profile.image_reference.sku, version=machine.storage_profile.image_reference.version ) # Add windows details if machine.os_profile is not None and machine.os_profile.windows_configuration is not None: host_vars['ansible_connection'] = 'winrm' host_vars['windows_auto_updates_enabled'] = \ machine.os_profile.windows_configuration.enable_automatic_updates host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone host_vars['windows_rm'] = None if machine.os_profile.windows_configuration.win_rm is not None: host_vars['windows_rm'] = dict(listeners=None) if machine.os_profile.windows_configuration.win_rm.listeners is not None: host_vars['windows_rm']['listeners'] = [] for listener in machine.os_profile.windows_configuration.win_rm.listeners: host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol, certificate_url=listener.certificate_url)) for interface in machine.network_profile.network_interfaces: interface_reference = self._parse_ref_id(interface.id) network_interface = self._network_client.network_interfaces.get( interface_reference['resourceGroups'], interface_reference['networkInterfaces']) if network_interface.primary: if self.group_by_security_group and \ self._security_groups[resource_group].get(network_interface.id, None): host_vars['security_group'] = \ self._security_groups[resource_group][network_interface.id]['name'] host_vars['security_group_id'] = \ self._security_groups[resource_group][network_interface.id]['id'] host_vars['network_interface'] = network_interface.name host_vars['network_interface_id'] = network_interface.id host_vars['mac_address'] = network_interface.mac_address for ip_config in network_interface.ip_configurations: host_vars['private_ip'] = ip_config.private_ip_address host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method if ip_config.public_ip_address: public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id) public_ip_address = self._network_client.public_ip_addresses.get( public_ip_reference['resourceGroups'], public_ip_reference['publicIPAddresses']) host_vars['ansible_host'] = public_ip_address.ip_address host_vars['public_ip'] = public_ip_address.ip_address host_vars['public_ip_name'] = public_ip_address.name host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method host_vars['public_ip_id'] = public_ip_address.id if public_ip_address.dns_settings: host_vars['fqdn'] = public_ip_address.dns_settings.fqdn self._add_host(host_vars) def _selected_machines(self, virtual_machines): selected_machines = [] for machine in virtual_machines: if self._args.host and self._args.host == machine.name: selected_machines.append(machine) if self.tags and self._tags_match(machine.tags, self.tags): selected_machines.append(machine) if self.locations and machine.location in self.locations: selected_machines.append(machine) return selected_machines def _get_security_groups(self, resource_group): ''' For a given resource_group build a mapping of network_interface.id to security_group name ''' if not self._security_groups: self._security_groups = dict() if not self._security_groups.get(resource_group): self._security_groups[resource_group] = dict() for group in self._network_client.network_security_groups.list(resource_group): if group.network_interfaces: for interface in group.network_interfaces: self._security_groups[resource_group][interface.id] = dict( name=group.name, id=group.id ) def _get_powerstate(self, resource_group, name): try: vm = self._compute_client.virtual_machines.get(resource_group, name, expand='instanceview') except Exception as exc: sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc))) return next((s.code.replace('PowerState/', '') for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None) def _add_host(self, vars): host_name = self._to_safe(vars['name']) resource_group = self._to_safe(vars['resource_group']) security_group = None if vars.get('security_group'): security_group = self._to_safe(vars['security_group']) if self.group_by_resource_group: if not self._inventory.get(resource_group): self._inventory[resource_group] = [] self._inventory[resource_group].append(host_name) if self.group_by_location: if not self._inventory.get(vars['location']): self._inventory[vars['location']] = [] self._inventory[vars['location']].append(host_name) if self.group_by_security_group and security_group: if not self._inventory.get(security_group): self._inventory[security_group] = [] self._inventory[security_group].append(host_name) self._inventory['_meta']['hostvars'][host_name] = vars self._inventory['azure'].append(host_name) if self.group_by_tag and vars.get('tags'): for key, value in vars['tags'].items(): safe_key = self._to_safe(key) safe_value = safe_key + '_' + self._to_safe(value) if not self._inventory.get(safe_key): self._inventory[safe_key] = [] if not self._inventory.get(safe_value): self._inventory[safe_value] = [] self._inventory[safe_key].append(host_name) self._inventory[safe_value].append(host_name) def _json_format_dict(self, pretty=False): # convert inventory to json if pretty: return json.dumps(self._inventory, sort_keys=True, indent=2) else: return json.dumps(self._inventory) def _get_settings(self): # Load settings from the .ini, if it exists. Otherwise, # look for environment values. file_settings = self._load_settings() if file_settings: for key in AZURE_CONFIG_SETTINGS: if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key): values = file_settings.get(key).split(',') if len(values) > 0: setattr(self, key, values) elif file_settings.get(key): val = self._to_boolean(file_settings[key]) setattr(self, key, val) else: env_settings = self._get_env_settings() for key in AZURE_CONFIG_SETTINGS: if key in('resource_groups', 'tags', 'locations') and env_settings.get(key): values = env_settings.get(key).split(',') if len(values) > 0: setattr(self, key, values) elif env_settings.get(key, None) is not None: val = self._to_boolean(env_settings[key]) setattr(self, key, val) def _parse_ref_id(self, reference): response = {} keys = reference.strip('/').split('/') for index in range(len(keys)): if index < len(keys) - 1 and index % 2 == 0: response[keys[index]] = keys[index + 1] return response def _to_boolean(self, value): if value in ['Yes', 'yes', 1, 'True', 'true', True]: result = True elif value in ['No', 'no', 0, 'False', 'false', False]: result = False else: result = True return result def _get_env_settings(self): env_settings = dict() for attribute, env_variable in AZURE_CONFIG_SETTINGS.items(): env_settings[attribute] = os.environ.get(env_variable, None) return env_settings def _load_settings(self): basename = os.path.splitext(os.path.basename(__file__))[0] default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini')) path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_INI_PATH', default_path))) config = None settings = None try: config = cp.ConfigParser() config.read(path) except: pass if config is not None: settings = dict() for key in AZURE_CONFIG_SETTINGS: try: settings[key] = config.get('azure', key, raw=True) except: pass return settings def _tags_match(self, tag_obj, tag_args): ''' Return True if the tags object from a VM contains the requested tag values. :param tag_obj: Dictionary of string:string pairs :param tag_args: List of strings in the form key=value :return: boolean ''' if not tag_obj: return False matches = 0 for arg in tag_args: arg_key = arg arg_value = None if re.search(r':', arg): arg_key, arg_value = arg.split(':') if arg_value and tag_obj.get(arg_key, None) == arg_value: matches += 1 elif not arg_value and tag_obj.get(arg_key, None) is not None: matches += 1 if matches == len(tag_args): return True return False def _to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' regex = r"[^A-Za-z0-9\_" if not self.replace_dash_in_groups: regex += r"\-" return re.sub(regex + "]", "_", word) def main(): if not HAS_AZURE: sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format(AZURE_MIN_VERSION, HAS_AZURE_EXC)) AzureInventory() if __name__ == '__main__': main() ansible-2.5.1/contrib/inventory/brook.ini0000644000000000000000000000273213265756155020427 0ustar rootroot00000000000000# Copyright 2016 Doalitic. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # The Brook.io inventory script has the following dependencies: # 1. A working Brook.io account # See https://brook.io # 2. A valid token generated through the 'API token' panel of Brook.io # 3. The libbrook python libray. # See https://github.com/doalitic/libbrook # # Author: Francisco Ros [brook] # Valid API token (required). # E.g. 'Aed342a12A60433697281FeEe1a4037C' # api_token = # Project id within Brook.io, as obtained from the project settings (optional). If provided, the # generated inventory will just include the hosts that belong to such project. Otherwise, it will # include all hosts in projects the requesting user has access to. The response includes groups # 'project_x', being 'x' the project name. # E.g. '2e8e099e1bc34cc0979d97ac34e9577b' # project_id = ansible-2.5.1/contrib/inventory/brook.py0000755000000000000000000002270613265756155020306 0ustar rootroot00000000000000#!/usr/bin/env python # Copyright 2016 Doalitic. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . """ Brook.io external inventory script ================================== Generates inventory that Ansible can understand by making API requests to Brook.io via the libbrook library. Hence, such dependency must be installed in the system to run this script. The default configuration file is named 'brook.ini' and is located alongside this script. You can choose any other file by setting the BROOK_INI_PATH environment variable. If param 'project_id' is left blank in 'brook.ini', the inventory includes all the instances in projects where the requesting user belongs. Otherwise, only instances from the given project are included, provided the requesting user belongs to it. The following variables are established for every host. They can be retrieved from the hostvars dictionary. - brook_pid: str - brook_name: str - brook_description: str - brook_project: str - brook_template: str - brook_region: str - brook_zone: str - brook_status: str - brook_tags: list(str) - brook_internal_ips: list(str) - brook_external_ips: list(str) - brook_created_at - brook_updated_at - ansible_ssh_host Instances are grouped by the following categories: - tag: A group is created for each tag. E.g. groups 'tag_foo' and 'tag_bar' are created if there exist instances with tags 'foo' and/or 'bar'. - project: A group is created for each project. E.g. group 'project_test' is created if a project named 'test' exist. - status: A group is created for each instance state. E.g. groups 'status_RUNNING' and 'status_PENDING' are created if there are instances in running and pending state. Examples: Execute uname on all instances in project 'test' $ ansible -i brook.py project_test -m shell -a "/bin/uname -a" Install nginx on all debian web servers tagged with 'www' $ ansible -i brook.py tag_www -m apt -a "name=nginx state=present" Run site.yml playbook on web servers $ ansible-playbook -i brook.py site.yml -l tag_www Support: This script is tested on Python 2.7 and 3.4. It may work on other versions though. Author: Francisco Ros Version: 0.2 """ import sys import os try: from ConfigParser import SafeConfigParser as ConfigParser except ImportError: from configparser import ConfigParser try: import json except ImportError: import simplejson as json try: import libbrook except: sys.exit('Brook.io inventory script requires libbrook. See https://github.com/doalitic/libbrook') class BrookInventory: _API_ENDPOINT = 'https://api.brook.io' def __init__(self): self._configure_from_file() self.client = self.get_api_client() self.inventory = self.get_inventory() def _configure_from_file(self): """Initialize from .ini file. Configuration file is assumed to be named 'brook.ini' and to be located on the same directory than this file, unless the environment variable BROOK_INI_PATH says otherwise. """ brook_ini_default_path = \ os.path.join(os.path.dirname(os.path.realpath(__file__)), 'brook.ini') brook_ini_path = os.environ.get('BROOK_INI_PATH', brook_ini_default_path) config = ConfigParser(defaults={ 'api_token': '', 'project_id': '' }) config.read(brook_ini_path) self.api_token = config.get('brook', 'api_token') self.project_id = config.get('brook', 'project_id') if not self.api_token: sys.exit('You must provide (at least) your Brook.io API token to generate the dynamic ' 'inventory.') def get_api_client(self): """Authenticate user via the provided credentials and return the corresponding API client. """ # Get JWT token from API token # unauthenticated_client = libbrook.ApiClient(host=self._API_ENDPOINT) auth_api = libbrook.AuthApi(unauthenticated_client) api_token = libbrook.AuthTokenRequest() api_token.token = self.api_token jwt = auth_api.auth_token(token=api_token) # Create authenticated API client # return libbrook.ApiClient(host=self._API_ENDPOINT, header_name='Authorization', header_value='Bearer %s' % jwt.token) def get_inventory(self): """Generate Ansible inventory. """ groups = dict() meta = dict() meta['hostvars'] = dict() instances_api = libbrook.InstancesApi(self.client) projects_api = libbrook.ProjectsApi(self.client) templates_api = libbrook.TemplatesApi(self.client) # If no project is given, get all projects the requesting user has access to # if not self.project_id: projects = [project.id for project in projects_api.index_projects()] else: projects = [self.project_id] # Build inventory from instances in all projects # for project_id in projects: project = projects_api.show_project(project_id=project_id) for instance in instances_api.index_instances(project_id=project_id): # Get template used for this instance if known template = templates_api.show_template(template_id=instance.template) if instance.template else None # Update hostvars try: meta['hostvars'][instance.name] = \ self.hostvars(project, instance, template, instances_api) except libbrook.rest.ApiException: continue # Group by project project_group = 'project_%s' % project.name if project_group in groups: groups[project_group].append(instance.name) else: groups[project_group] = [instance.name] # Group by status status_group = 'status_%s' % meta['hostvars'][instance.name]['brook_status'] if status_group in groups: groups[status_group].append(instance.name) else: groups[status_group] = [instance.name] # Group by tags tags = meta['hostvars'][instance.name]['brook_tags'] for tag in tags: tag_group = 'tag_%s' % tag if tag_group in groups: groups[tag_group].append(instance.name) else: groups[tag_group] = [instance.name] groups['_meta'] = meta return groups def hostvars(self, project, instance, template, api): """Return the hostvars dictionary for the given instance. Raise libbrook.rest.ApiException if it cannot retrieve all required information from the Brook.io API. """ hostvars = instance.to_dict() hostvars['brook_pid'] = hostvars.pop('pid') hostvars['brook_name'] = hostvars.pop('name') hostvars['brook_description'] = hostvars.pop('description') hostvars['brook_project'] = hostvars.pop('project') hostvars['brook_template'] = hostvars.pop('template') hostvars['brook_region'] = hostvars.pop('region') hostvars['brook_zone'] = hostvars.pop('zone') hostvars['brook_created_at'] = hostvars.pop('created_at') hostvars['brook_updated_at'] = hostvars.pop('updated_at') del hostvars['id'] del hostvars['key'] del hostvars['provider'] del hostvars['image'] # Substitute identifiers for names # hostvars['brook_project'] = project.name hostvars['brook_template'] = template.name if template else None # Retrieve instance state # status = api.status_instance(project_id=project.id, instance_id=instance.id) hostvars.update({'brook_status': status.state}) # Retrieve instance tags # tags = api.instance_tags(project_id=project.id, instance_id=instance.id) hostvars.update({'brook_tags': tags}) # Retrieve instance addresses # addresses = api.instance_addresses(project_id=project.id, instance_id=instance.id) internal_ips = [address.address for address in addresses if address.scope == 'internal'] external_ips = [address.address for address in addresses if address.address and address.scope == 'external'] hostvars.update({'brook_internal_ips': internal_ips}) hostvars.update({'brook_external_ips': external_ips}) try: hostvars.update({'ansible_ssh_host': external_ips[0]}) except IndexError: raise libbrook.rest.ApiException(status='502', reason='Instance without public IP') return hostvars # Run the script # brook = BrookInventory() print(json.dumps(brook.inventory)) ansible-2.5.1/contrib/inventory/cloudforms.ini0000644000000000000000000000212613265756155021465 0ustar rootroot00000000000000[cloudforms] # the version of CloudForms ; currently not used, but tested with version = 4.1 # This should be the hostname of the CloudForms server url = https://cfme.example.com # This will more than likely need to be a local CloudForms username username = # The password for said username password = # True = verify SSL certificate / False = trust anything ssl_verify = True # limit the number of vms returned per request limit = 100 # purge the CloudForms actions from hosts purge_actions = True # Clean up group names (from tags and other groupings so Ansible doesn't complain) clean_group_keys = True # Explode tags into nested groups / subgroups nest_tags = False # If set, ensure host name are suffixed with this value # Note: This suffix *must* include the leading '.' as it is appended to the hostname as is # suffix = .example.org # If true, will try and use an IPv4 address for the ansible_ssh_host rather than just the first IP address in the list prefer_ipv4 = False [cache] # Maximum time to trust the cache in seconds max_age = 600 ansible-2.5.1/contrib/inventory/cloudforms.py0000755000000000000000000004554613265756155021356 0ustar rootroot00000000000000#!/usr/bin/env python # vim: set fileencoding=utf-8 : # # Copyright (C) 2016 Guido Günther # # This script is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with it. If not, see . # # This is loosely based on the foreman inventory script # -- Josh Preston # from __future__ import print_function import argparse import ConfigParser import os import re from time import time import requests from requests.auth import HTTPBasicAuth import warnings from ansible.errors import AnsibleError try: import json except ImportError: import simplejson as json class CloudFormsInventory(object): def __init__(self): """ Main execution path """ self.inventory = dict() # A list of groups and the hosts in that group self.hosts = dict() # Details about hosts in the inventory # Parse CLI arguments self.parse_cli_args() # Read settings self.read_settings() # Cache if self.args.refresh_cache or not self.is_cache_valid(): self.update_cache() else: self.load_inventory_from_cache() self.load_hosts_from_cache() data_to_print = "" # Data to print if self.args.host: if self.args.debug: print("Fetching host [%s]" % self.args.host) data_to_print += self.get_host_info(self.args.host) else: self.inventory['_meta'] = {'hostvars': {}} for hostname in self.hosts: self.inventory['_meta']['hostvars'][hostname] = { 'cloudforms': self.hosts[hostname], } # include the ansible_ssh_host in the top level if 'ansible_ssh_host' in self.hosts[hostname]: self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.hosts[hostname]['ansible_ssh_host'] data_to_print += self.json_format_dict(self.inventory, self.args.pretty) print(data_to_print) def is_cache_valid(self): """ Determines if the cache files have expired, or if it is still valid """ if self.args.debug: print("Determining if cache [%s] is still valid (< %s seconds old)" % (self.cache_path_hosts, self.cache_max_age)) if os.path.isfile(self.cache_path_hosts): mod_time = os.path.getmtime(self.cache_path_hosts) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_inventory): if self.args.debug: print("Cache is still valid!") return True if self.args.debug: print("Cache is stale or does not exist.") return False def read_settings(self): """ Reads the settings from the cloudforms.ini file """ config = ConfigParser.SafeConfigParser() config_paths = [ os.path.dirname(os.path.realpath(__file__)) + '/cloudforms.ini', "/etc/ansible/cloudforms.ini", ] env_value = os.environ.get('CLOUDFORMS_INI_PATH') if env_value is not None: config_paths.append(os.path.expanduser(os.path.expandvars(env_value))) if self.args.debug: for config_path in config_paths: print("Reading from configuration file [%s]" % config_path) config.read(config_paths) # CloudForms API related if config.has_option('cloudforms', 'url'): self.cloudforms_url = config.get('cloudforms', 'url') else: self.cloudforms_url = None if not self.cloudforms_url: warnings.warn("No url specified, expected something like 'https://cfme.example.com'") if config.has_option('cloudforms', 'username'): self.cloudforms_username = config.get('cloudforms', 'username') else: self.cloudforms_username = None if not self.cloudforms_username: warnings.warn("No username specified, you need to specify a CloudForms username.") if config.has_option('cloudforms', 'password'): self.cloudforms_pw = config.get('cloudforms', 'password', raw=True) else: self.cloudforms_pw = None if not self.cloudforms_pw: warnings.warn("No password specified, you need to specify a password for the CloudForms user.") if config.has_option('cloudforms', 'ssl_verify'): self.cloudforms_ssl_verify = config.getboolean('cloudforms', 'ssl_verify') else: self.cloudforms_ssl_verify = True if config.has_option('cloudforms', 'version'): self.cloudforms_version = config.get('cloudforms', 'version') else: self.cloudforms_version = None if config.has_option('cloudforms', 'limit'): self.cloudforms_limit = config.getint('cloudforms', 'limit') else: self.cloudforms_limit = 100 if config.has_option('cloudforms', 'purge_actions'): self.cloudforms_purge_actions = config.getboolean('cloudforms', 'purge_actions') else: self.cloudforms_purge_actions = True if config.has_option('cloudforms', 'clean_group_keys'): self.cloudforms_clean_group_keys = config.getboolean('cloudforms', 'clean_group_keys') else: self.cloudforms_clean_group_keys = True if config.has_option('cloudforms', 'nest_tags'): self.cloudforms_nest_tags = config.getboolean('cloudforms', 'nest_tags') else: self.cloudforms_nest_tags = False if config.has_option('cloudforms', 'suffix'): self.cloudforms_suffix = config.get('cloudforms', 'suffix') if self.cloudforms_suffix[0] != '.': raise AnsibleError('Leading fullstop is required for Cloudforms suffix') else: self.cloudforms_suffix = None if config.has_option('cloudforms', 'prefer_ipv4'): self.cloudforms_prefer_ipv4 = config.getboolean('cloudforms', 'prefer_ipv4') else: self.cloudforms_prefer_ipv4 = False # Ansible related try: group_patterns = config.get('ansible', 'group_patterns') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): group_patterns = "[]" self.group_patterns = eval(group_patterns) # Cache related try: cache_path = os.path.expanduser(config.get('cache', 'path')) except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): cache_path = '.' (script, ext) = os.path.splitext(os.path.basename(__file__)) self.cache_path_hosts = cache_path + "/%s.hosts" % script self.cache_path_inventory = cache_path + "/%s.inventory" % script self.cache_max_age = config.getint('cache', 'max_age') if self.args.debug: print("CloudForms settings:") print("cloudforms_url = %s" % self.cloudforms_url) print("cloudforms_username = %s" % self.cloudforms_username) print("cloudforms_pw = %s" % self.cloudforms_pw) print("cloudforms_ssl_verify = %s" % self.cloudforms_ssl_verify) print("cloudforms_version = %s" % self.cloudforms_version) print("cloudforms_limit = %s" % self.cloudforms_limit) print("cloudforms_purge_actions = %s" % self.cloudforms_purge_actions) print("Cache settings:") print("cache_max_age = %s" % self.cache_max_age) print("cache_path_hosts = %s" % self.cache_path_hosts) print("cache_path_inventory = %s" % self.cache_path_inventory) def parse_cli_args(self): """ Command line argument processing """ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on CloudForms managed VMs') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print JSON output (default: False)') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to CloudForms (default: False - use cache files)') parser.add_argument('--debug', action='store_true', default=False, help='Show debug output while running (default: False)') self.args = parser.parse_args() def _get_json(self, url): """ Make a request and return the JSON """ results = [] ret = requests.get(url, auth=HTTPBasicAuth(self.cloudforms_username, self.cloudforms_pw), verify=self.cloudforms_ssl_verify) ret.raise_for_status() try: results = json.loads(ret.text) except ValueError: warnings.warn("Unexpected response from {0} ({1}): {2}".format(self.cloudforms_url, ret.status_code, ret.reason)) results = {} if self.args.debug: print("=======================================================================") print("=======================================================================") print("=======================================================================") print(ret.text) print("=======================================================================") print("=======================================================================") print("=======================================================================") return results def _get_hosts(self): """ Get all hosts by paging through the results """ limit = self.cloudforms_limit page = 0 last_page = False results = [] while not last_page: offset = page * limit ret = self._get_json("%s/api/vms?offset=%s&limit=%s&expand=resources,tags,hosts,&attributes=ipaddresses" % (self.cloudforms_url, offset, limit)) results += ret['resources'] if ret['subcount'] < limit: last_page = True page += 1 return results def update_cache(self): """ Make calls to cloudforms and save the output in a cache """ self.groups = dict() self.hosts = dict() if self.args.debug: print("Updating cache...") for host in self._get_hosts(): if self.cloudforms_suffix is not None and not host['name'].endswith(self.cloudforms_suffix): host['name'] = host['name'] + self.cloudforms_suffix # Ignore VMs that are not powered on if host['power_state'] != 'on': if self.args.debug: print("Skipping %s because power_state = %s" % (host['name'], host['power_state'])) continue # purge actions if self.cloudforms_purge_actions and 'actions' in host: del host['actions'] # Create ansible groups for tags if 'tags' in host: # Create top-level group if 'tags' not in self.inventory: self.inventory['tags'] = dict(children=[], vars={}, hosts=[]) if not self.cloudforms_nest_tags: # don't expand tags, just use them in a safe way for group in host['tags']: # Add sub-group, as a child of top-level safe_key = self.to_safe(group['name']) if safe_key: if self.args.debug: print("Adding sub-group '%s' to parent 'tags'" % safe_key) if safe_key not in self.inventory['tags']['children']: self.push(self.inventory['tags'], 'children', safe_key) self.push(self.inventory, safe_key, host['name']) if self.args.debug: print("Found tag [%s] for host which will be mapped to [%s]" % (group['name'], safe_key)) else: # expand the tags into nested groups / sub-groups # Create nested groups for tags safe_parent_tag_name = 'tags' for tag in host['tags']: tag_hierarchy = tag['name'][1:].split('/') if self.args.debug: print("Working on list %s" % tag_hierarchy) for tag_name in tag_hierarchy: if self.args.debug: print("Working on tag_name = %s" % tag_name) safe_tag_name = self.to_safe(tag_name) if self.args.debug: print("Using sanitized name %s" % safe_tag_name) # Create sub-group if safe_tag_name not in self.inventory: self.inventory[safe_tag_name] = dict(children=[], vars={}, hosts=[]) # Add sub-group, as a child of top-level if safe_parent_tag_name: if self.args.debug: print("Adding sub-group '%s' to parent '%s'" % (safe_tag_name, safe_parent_tag_name)) if safe_tag_name not in self.inventory[safe_parent_tag_name]['children']: self.push(self.inventory[safe_parent_tag_name], 'children', safe_tag_name) # Make sure the next one uses this one as it's parent safe_parent_tag_name = safe_tag_name # Add the host to the last tag self.push(self.inventory[safe_parent_tag_name], 'hosts', host['name']) # Set ansible_ssh_host to the first available ip address if 'ipaddresses' in host and host['ipaddresses'] and isinstance(host['ipaddresses'], list): # If no preference for IPv4, just use the first entry if not self.cloudforms_prefer_ipv4: host['ansible_ssh_host'] = host['ipaddresses'][0] else: # Before we search for an IPv4 address, set using the first entry in case we don't find any host['ansible_ssh_host'] = host['ipaddresses'][0] for currenthost in host['ipaddresses']: if '.' in currenthost: host['ansible_ssh_host'] = currenthost # Create additional groups for key in ('location', 'type', 'vendor'): safe_key = self.to_safe(host[key]) # Create top-level group if key not in self.inventory: self.inventory[key] = dict(children=[], vars={}, hosts=[]) # Create sub-group if safe_key not in self.inventory: self.inventory[safe_key] = dict(children=[], vars={}, hosts=[]) # Add sub-group, as a child of top-level if safe_key not in self.inventory[key]['children']: self.push(self.inventory[key], 'children', safe_key) if key in host: # Add host to sub-group self.push(self.inventory[safe_key], 'hosts', host['name']) self.hosts[host['name']] = host self.push(self.inventory, 'all', host['name']) if self.args.debug: print("Saving cached data") self.write_to_cache(self.hosts, self.cache_path_hosts) self.write_to_cache(self.inventory, self.cache_path_inventory) def get_host_info(self, host): """ Get variables about a specific host """ if not self.hosts or len(self.hosts) == 0: # Need to load cache from cache self.load_hosts_from_cache() if host not in self.hosts: if self.args.debug: print("[%s] not found in cache." % host) # try updating the cache self.update_cache() if host not in self.hosts: if self.args.debug: print("[%s] does not exist after cache update." % host) # host might not exist anymore return self.json_format_dict({}, self.args.pretty) return self.json_format_dict(self.hosts[host], self.args.pretty) def push(self, d, k, v): """ Safely puts a new entry onto an array. """ if k in d: d[k].append(v) else: d[k] = [v] def load_inventory_from_cache(self): """ Reads the inventory from the cache file sets self.inventory """ cache = open(self.cache_path_inventory, 'r') json_inventory = cache.read() self.inventory = json.loads(json_inventory) def load_hosts_from_cache(self): """ Reads the cache from the cache file sets self.hosts """ cache = open(self.cache_path_hosts, 'r') json_cache = cache.read() self.hosts = json.loads(json_cache) def write_to_cache(self, data, filename): """ Writes data in JSON format to a file """ json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): """ Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """ if self.cloudforms_clean_group_keys: regex = r"[^A-Za-z0-9\_]" return re.sub(regex, "_", word.replace(" ", "")) else: return word def json_format_dict(self, data, pretty=False): """ Converts a dict to a JSON object and dumps it as a formatted string """ if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) CloudFormsInventory() ansible-2.5.1/contrib/inventory/cloudstack.ini0000644000000000000000000000024113265756155021440 0ustar rootroot00000000000000[cloudstack] #endpoint = https://api.exoscale.ch/compute endpoint = https://cloud.example.com/client/api key = cloudstack api key secret = cloudstack api secret ansible-2.5.1/contrib/inventory/cloudstack.py0000755000000000000000000002236313265756155021325 0ustar rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # (c) 2015, René Moser # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### """ Ansible CloudStack external inventory script. ============================================= Generates Ansible inventory from CloudStack. Configuration is read from 'cloudstack.ini'. If you need to pass the project, write a simple wrapper script, e.g. project_cloudstack.sh: #!/bin/bash cloudstack.py --project $@ When run against a specific host, this script returns the following attributes based on the data obtained from CloudStack API: "web01": { "cpu_number": 2, "nic": [ { "ip": "10.102.76.98", "mac": "02:00:50:99:00:01", "type": "Isolated", "netmask": "255.255.255.0", "gateway": "10.102.76.1" }, { "ip": "10.102.138.63", "mac": "06:b7:5a:00:14:84", "type": "Shared", "netmask": "255.255.255.0", "gateway": "10.102.138.1" } ], "default_ip": "10.102.76.98", "zone": "ZUERICH", "created": "2014-07-02T07:53:50+0200", "hypervisor": "VMware", "memory": 2048, "state": "Running", "tags": [], "cpu_speed": 1800, "affinity_group": [], "service_offering": "Small", "cpu_used": "62%" } usage: cloudstack.py [--list] [--host HOST] [--project PROJECT] [--domain DOMAIN] """ from __future__ import print_function import sys import argparse try: import json except: import simplejson as json try: from cs import CloudStack, CloudStackException, read_config except ImportError: print("Error: CloudStack library must be installed: pip install cs.", file=sys.stderr) sys.exit(1) class CloudStackInventory(object): def __init__(self): parser = argparse.ArgumentParser() parser.add_argument('--host') parser.add_argument('--list', action='store_true') parser.add_argument('--tag', help="Filter machines by a tag. Should be in the form key=value.") parser.add_argument('--project') parser.add_argument('--domain') options = parser.parse_args() try: self.cs = CloudStack(**read_config()) except CloudStackException: print("Error: Could not connect to CloudStack API", file=sys.stderr) domain_id = None if options.domain: domain_id = self.get_domain_id(options.domain) project_id = None if options.project: project_id = self.get_project_id(options.project, domain_id) if options.host: data = self.get_host(options.host, project_id, domain_id) print(json.dumps(data, indent=2)) elif options.list: tags = dict() if options.tag: tags['tags[0].key'], tags['tags[0].value'] = options.tag.split('=') data = self.get_list(project_id, domain_id, **tags) print(json.dumps(data, indent=2)) else: print("usage: --list [--tag ] | --host [--project ] [--domain ]", file=sys.stderr) sys.exit(1) def get_domain_id(self, domain): domains = self.cs.listDomains(listall=True) if domains: for d in domains['domain']: if d['path'].lower() == domain.lower(): return d['id'] print("Error: Domain %s not found." % domain, file=sys.stderr) sys.exit(1) def get_project_id(self, project, domain_id=None): projects = self.cs.listProjects(domainid=domain_id) if projects: for p in projects['project']: if p['name'] == project or p['id'] == project: return p['id'] print("Error: Project %s not found." % project, file=sys.stderr) sys.exit(1) def get_host(self, name, project_id=None, domain_id=None, **kwargs): hosts = self.cs.listVirtualMachines(projectid=project_id, domainid=domain_id, **kwargs) data = {} if not hosts: return data for host in hosts['virtualmachine']: host_name = host['displayname'] if name == host_name: data['zone'] = host['zonename'] if 'group' in host: data['group'] = host['group'] data['state'] = host['state'] data['service_offering'] = host['serviceofferingname'] data['affinity_group'] = host['affinitygroup'] data['security_group'] = host['securitygroup'] data['cpu_number'] = host['cpunumber'] data['cpu_speed'] = host['cpuspeed'] if 'cpuused' in host: data['cpu_used'] = host['cpuused'] data['memory'] = host['memory'] data['tags'] = host['tags'] data['hypervisor'] = host['hypervisor'] data['created'] = host['created'] data['nic'] = [] for nic in host['nic']: data['nic'].append({ 'ip': nic['ipaddress'], 'mac': nic['macaddress'], 'netmask': nic['netmask'], 'gateway': nic['gateway'], 'type': nic['type'], }) if nic['isdefault']: data['default_ip'] = nic['ipaddress'] break return data def get_list(self, project_id=None, domain_id=None, **kwargs): data = { 'all': { 'hosts': [], }, '_meta': { 'hostvars': {}, }, } groups = self.cs.listInstanceGroups(projectid=project_id, domainid=domain_id) if groups: for group in groups['instancegroup']: group_name = group['name'] if group_name and group_name not in data: data[group_name] = { 'hosts': [] } hosts = self.cs.listVirtualMachines(projectid=project_id, domainid=domain_id, **kwargs) if not hosts: return data for host in hosts['virtualmachine']: host_name = host['displayname'] data['all']['hosts'].append(host_name) data['_meta']['hostvars'][host_name] = {} # Make a group per zone data['_meta']['hostvars'][host_name]['zone'] = host['zonename'] group_name = host['zonename'] if group_name not in data: data[group_name] = { 'hosts': [] } data[group_name]['hosts'].append(host_name) if 'group' in host: data['_meta']['hostvars'][host_name]['group'] = host['group'] data['_meta']['hostvars'][host_name]['state'] = host['state'] data['_meta']['hostvars'][host_name]['service_offering'] = host['serviceofferingname'] data['_meta']['hostvars'][host_name]['affinity_group'] = host['affinitygroup'] data['_meta']['hostvars'][host_name]['security_group'] = host['securitygroup'] data['_meta']['hostvars'][host_name]['cpu_number'] = host['cpunumber'] data['_meta']['hostvars'][host_name]['cpu_speed'] = host['cpuspeed'] if 'cpuused' in host: data['_meta']['hostvars'][host_name]['cpu_used'] = host['cpuused'] data['_meta']['hostvars'][host_name]['created'] = host['created'] data['_meta']['hostvars'][host_name]['memory'] = host['memory'] data['_meta']['hostvars'][host_name]['tags'] = host['tags'] data['_meta']['hostvars'][host_name]['hypervisor'] = host['hypervisor'] data['_meta']['hostvars'][host_name]['created'] = host['created'] data['_meta']['hostvars'][host_name]['nic'] = [] for nic in host['nic']: data['_meta']['hostvars'][host_name]['nic'].append({ 'ip': nic['ipaddress'], 'mac': nic['macaddress'], 'netmask': nic['netmask'], 'gateway': nic['gateway'], 'type': nic['type'], }) if nic['isdefault']: data['_meta']['hostvars'][host_name]['default_ip'] = nic['ipaddress'] group_name = '' if 'group' in host: group_name = host['group'] if group_name and group_name in data: data[group_name]['hosts'].append(host_name) return data if __name__ == '__main__': CloudStackInventory() ansible-2.5.1/contrib/inventory/cobbler.ini0000644000000000000000000000123713265756155020722 0ustar rootroot00000000000000# Ansible Cobbler external inventory script settings # [cobbler] host = http://PATH_TO_COBBLER_SERVER/cobbler_api # If API needs authentication add 'username' and 'password' options here. #username = foo #password = bar # API calls to Cobbler can be slow. For this reason, we cache the results of an API # call. Set this to the path you want cache files to be written to. Two files # will be written to this directory: # - ansible-cobbler.cache # - ansible-cobbler.index cache_path = /tmp # The number of seconds a cache file is considered valid. After this many # seconds, a new API call will be made, and the cache file will be updated. cache_max_age = 900 ansible-2.5.1/contrib/inventory/cobbler.py0000755000000000000000000002462513265756155020604 0ustar rootroot00000000000000#!/usr/bin/env python """ Cobbler external inventory script ================================= Ansible has a feature where instead of reading from /etc/ansible/hosts as a text file, it can query external programs to obtain the list of hosts, groups the hosts are in, and even variables to assign to each host. To use this, copy this file over /etc/ansible/hosts and chmod +x the file. This, more or less, allows you to keep one central database containing info about all of your managed instances. This script is an example of sourcing that data from Cobbler (http://cobbler.github.com). With cobbler each --mgmt-class in cobbler will correspond to a group in Ansible, and --ks-meta variables will be passed down for use in templates or even in argument lines. NOTE: The cobbler system names will not be used. Make sure a cobbler --dns-name is set for each cobbler system. If a system appears with two DNS names we do not add it twice because we don't want ansible talking to it twice. The first one found will be used. If no --dns-name is set the system will NOT be visible to ansible. We do not add cobbler system names because there is no requirement in cobbler that those correspond to addresses. See http://ansible.github.com/api.html for more info Tested with Cobbler 2.0.11. Changelog: - 2015-06-21 dmccue: Modified to support run-once _meta retrieval, results in higher performance at ansible startup. Groups are determined by owner rather than default mgmt_classes. DNS name determined from hostname. cobbler values are written to a 'cobbler' fact namespace - 2013-09-01 pgehres: Refactored implementation to make use of caching and to limit the number of connections to external cobbler server for performance. Added use of cobbler.ini file to configure settings. Tested with Cobbler 2.4.0 """ # (c) 2012, Michael DeHaan # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### import argparse import ConfigParser import os import re from time import time import xmlrpclib try: import json except ImportError: import simplejson as json from six import iteritems # NOTE -- this file assumes Ansible is being accessed FROM the cobbler # server, so it does not attempt to login with a username and password. # this will be addressed in a future version of this script. orderby_keyname = 'owners' # alternatively 'mgmt_classes' class CobblerInventory(object): def __init__(self): """ Main execution path """ self.conn = None self.inventory = dict() # A list of groups and the hosts in that group self.cache = dict() # Details about hosts in the inventory # Read settings and parse CLI arguments self.read_settings() self.parse_cli_args() # Cache if self.args.refresh_cache: self.update_cache() elif not self.is_cache_valid(): self.update_cache() else: self.load_inventory_from_cache() self.load_cache_from_cache() data_to_print = "" # Data to print if self.args.host: data_to_print += self.get_host_info() else: self.inventory['_meta'] = {'hostvars': {}} for hostname in self.cache: self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname]} data_to_print += self.json_format_dict(self.inventory, True) print(data_to_print) def _connect(self): if not self.conn: self.conn = xmlrpclib.Server(self.cobbler_host, allow_none=True) self.token = None if self.cobbler_username is not None: self.token = self.conn.login(self.cobbler_username, self.cobbler_password) def is_cache_valid(self): """ Determines if the cache files have expired, or if it is still valid """ if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_inventory): return True return False def read_settings(self): """ Reads the settings from the cobbler.ini file """ config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/cobbler.ini') self.cobbler_host = config.get('cobbler', 'host') self.cobbler_username = None self.cobbler_password = None if config.has_option('cobbler', 'username'): self.cobbler_username = config.get('cobbler', 'username') if config.has_option('cobbler', 'password'): self.cobbler_password = config.get('cobbler', 'password') # Cache related cache_path = config.get('cobbler', 'cache_path') self.cache_path_cache = cache_path + "/ansible-cobbler.cache" self.cache_path_inventory = cache_path + "/ansible-cobbler.index" self.cache_max_age = config.getint('cobbler', 'cache_max_age') def parse_cli_args(self): """ Command line argument processing """ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Cobbler') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to cobbler (default: False - use cache files)') self.args = parser.parse_args() def update_cache(self): """ Make calls to cobbler and save the output in a cache """ self._connect() self.groups = dict() self.hosts = dict() if self.token is not None: data = self.conn.get_systems(self.token) else: data = self.conn.get_systems() for host in data: # Get the FQDN for the host and add it to the right groups dns_name = host['hostname'] # None ksmeta = None interfaces = host['interfaces'] # hostname is often empty for non-static IP hosts if dns_name == '': for (iname, ivalue) in iteritems(interfaces): if ivalue['management'] or not ivalue['static']: this_dns_name = ivalue.get('dns_name', None) if this_dns_name is not None and this_dns_name is not "": dns_name = this_dns_name if dns_name == '' or dns_name is None: continue status = host['status'] profile = host['profile'] classes = host[orderby_keyname] if status not in self.inventory: self.inventory[status] = [] self.inventory[status].append(dns_name) if profile not in self.inventory: self.inventory[profile] = [] self.inventory[profile].append(dns_name) for cls in classes: if cls not in self.inventory: self.inventory[cls] = [] self.inventory[cls].append(dns_name) # Since we already have all of the data for the host, update the host details as well # The old way was ksmeta only -- provide backwards compatibility self.cache[dns_name] = host if "ks_meta" in host: for key, value in iteritems(host["ks_meta"]): self.cache[dns_name][key] = value self.write_to_cache(self.cache, self.cache_path_cache) self.write_to_cache(self.inventory, self.cache_path_inventory) def get_host_info(self): """ Get variables about a specific host """ if not self.cache or len(self.cache) == 0: # Need to load index from cache self.load_cache_from_cache() if self.args.host not in self.cache: # try updating the cache self.update_cache() if self.args.host not in self.cache: # host might not exist anymore return self.json_format_dict({}, True) return self.json_format_dict(self.cache[self.args.host], True) def push(self, my_dict, key, element): """ Pushed an element onto an array that may not have been defined in the dict """ if key in my_dict: my_dict[key].append(element) else: my_dict[key] = [element] def load_inventory_from_cache(self): """ Reads the index from the cache file sets self.index """ cache = open(self.cache_path_inventory, 'r') json_inventory = cache.read() self.inventory = json.loads(json_inventory) def load_cache_from_cache(self): """ Reads the cache from the cache file sets self.cache """ cache = open(self.cache_path_cache, 'r') json_cache = cache.read() self.cache = json.loads(json_cache) def write_to_cache(self, data, filename): """ Writes data in JSON format to a file """ json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): """ Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """ return re.sub(r"[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): """ Converts a dict to a JSON object and dumps it as a formatted string """ if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) CobblerInventory() ansible-2.5.1/contrib/inventory/collins.ini0000644000000000000000000000401213265756155020747 0ustar rootroot00000000000000# Ansible Collins external inventory script settings # [collins] # You should not have a trailing slash or collins # will not properly match the URI host = http://localhost:9000 username = blake password = admin:first # Specifies a timeout for all HTTP requests to Collins. timeout_secs = 120 # Specifies a maximum number of retries per Collins request. max_retries = 5 # Specifies the number of results to return per paginated query as specified in # the Pagination section of the Collins API docs: # http://tumblr.github.io/collins/api.html results_per_query = 100 # Specifies the Collins asset type which will be queried for; most typically # you'll want to leave this at the default of SERVER_NODE. asset_type = SERVER_NODE # Collins assets can optionally be assigned hostnames; this option will preference # the selection of an asset's hostname over an IP address as the primary identifier # in the Ansible inventory. Typically, this value should be set to true if assets # are assigned hostnames. prefer_hostnames = true # Within Collins, assets can be granted multiple IP addresses; this configuration # value specifies the index within the 'ADDRESSES' array as returned by the # following API endpoint: # http://tumblr.github.io/collins/api.html#api-ipam-asset-addresses-section ip_address_index = 0 # Sets whether Collins instances in multiple datacenters will be queried. query_remote_dcs = false # API calls to Collins can involve large, substantial queries. For this reason, # we cache the results of an API call. Set this to the path you want cache files # to be written to. Two files will be written to this directory: # - ansible-collins.cache # - ansible-collins.index cache_path = /tmp # If errors occur while querying inventory, logging messages will be written # to a logfile in the specified directory: # - ansible-collins.log log_path = /tmp # The number of seconds that a cache file is considered valid. After this many # seconds, a new API call will be made, and the cache file will be updated. cache_max_age = 600 ansible-2.5.1/contrib/inventory/collins.py0000755000000000000000000004315713265756155020640 0ustar rootroot00000000000000#!/usr/bin/env python """ Collins external inventory script ================================= Ansible has a feature where instead of reading from /etc/ansible/hosts as a text file, it can query external programs to obtain the list of hosts, groups the hosts are in, and even variables to assign to each host. Collins is a hardware asset management system originally developed by Tumblr for tracking new hardware as it built out its own datacenters. It exposes a rich API for manipulating and querying one's hardware inventory, which makes it an ideal 'single point of truth' for driving systems automation like Ansible. Extensive documentation on Collins, including a quickstart, API docs, and a full reference manual, can be found here: http://tumblr.github.io/collins This script adds support to Ansible for obtaining a dynamic inventory of assets in your infrastructure, grouping them in Ansible by their useful attributes, and binding all facts provided by Collins to each host so that they can be used to drive automation. Some parts of this script were cribbed shamelessly from mdehaan's Cobbler inventory script. To use it, copy it to your repo and pass -i to the ansible or ansible-playbook command; if you'd like to use it by default, simply copy collins.ini to /etc/ansible and this script to /etc/ansible/hosts. Alongside the options set in collins.ini, there are several environment variables that will be used instead of the configured values if they are set: - COLLINS_USERNAME - specifies a username to use for Collins authentication - COLLINS_PASSWORD - specifies a password to use for Collins authentication - COLLINS_ASSET_TYPE - specifies a Collins asset type to use during querying; this can be used to run Ansible automation against different asset classes than server nodes, such as network switches and PDUs - COLLINS_CONFIG - specifies an alternative location for collins.ini, defaults to /collins.ini If errors are encountered during operation, this script will return an exit code of 255; otherwise, it will return an exit code of 0. Collins attributes are accessible as variables in ansible via the COLLINS['attribute_name']. Tested against Ansible 1.8.2 and Collins 1.3.0. """ # (c) 2014, Steve Salevan # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### import argparse import ConfigParser import logging import os import re import sys from time import time import traceback try: import json except ImportError: import simplejson as json from six import iteritems from six.moves.urllib.parse import urlencode from ansible.module_utils.urls import open_url class CollinsDefaults(object): ASSETS_API_ENDPOINT = '%s/api/assets' SPECIAL_ATTRIBUTES = set([ 'CREATED', 'DELETED', 'UPDATED', 'STATE', ]) LOG_FORMAT = '%(asctime)-15s %(message)s' class Error(Exception): pass class MaxRetriesError(Error): pass class CollinsInventory(object): def __init__(self): """ Constructs CollinsInventory object and reads all configuration. """ self.inventory = dict() # A list of groups and the hosts in that group self.cache = dict() # Details about hosts in the inventory # Read settings and parse CLI arguments self.read_settings() self.parse_cli_args() logging.basicConfig(format=CollinsDefaults.LOG_FORMAT, filename=self.log_location) self.log = logging.getLogger('CollinsInventory') def _asset_get_attribute(self, asset, attrib): """ Returns a user-defined attribute from an asset if it exists; otherwise, returns None. """ if 'ATTRIBS' in asset: for attrib_block in asset['ATTRIBS'].keys(): if attrib in asset['ATTRIBS'][attrib_block]: return asset['ATTRIBS'][attrib_block][attrib] return None def _asset_has_attribute(self, asset, attrib): """ Returns whether a user-defined attribute is present on an asset. """ if 'ATTRIBS' in asset: for attrib_block in asset['ATTRIBS'].keys(): if attrib in asset['ATTRIBS'][attrib_block]: return True return False def run(self): """ Main execution path """ # Updates cache if cache is not present or has expired. successful = True if self.args.refresh_cache: successful = self.update_cache() elif not self.is_cache_valid(): successful = self.update_cache() else: successful = self.load_inventory_from_cache() successful &= self.load_cache_from_cache() data_to_print = "" # Data to print if self.args.host: data_to_print = self.get_host_info() elif self.args.list: # Display list of instances for inventory data_to_print = self.json_format_dict(self.inventory, self.args.pretty) else: # default action with no options data_to_print = self.json_format_dict(self.inventory, self.args.pretty) print(data_to_print) return successful def find_assets(self, attributes=None, operation='AND'): """ Obtains Collins assets matching the provided attributes. """ attributes = {} if attributes is None else attributes # Formats asset search query to locate assets matching attributes, using # the CQL search feature as described here: # http://tumblr.github.io/collins/recipes.html attributes_query = ['='.join(attr_pair) for attr_pair in iteritems(attributes)] query_parameters = { 'details': ['True'], 'operation': [operation], 'query': attributes_query, 'remoteLookup': [str(self.query_remote_dcs)], 'size': [self.results_per_query], 'type': [self.collins_asset_type], } assets = [] cur_page = 0 num_retries = 0 # Locates all assets matching the provided query, exhausting pagination. while True: if num_retries == self.collins_max_retries: raise MaxRetriesError("Maximum of %s retries reached; giving up" % self.collins_max_retries) query_parameters['page'] = cur_page query_url = "%s?%s" % ( (CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host), urlencode(query_parameters, doseq=True) ) try: response = open_url(query_url, timeout=self.collins_timeout_secs, url_username=self.collins_username, url_password=self.collins_password, force_basic_auth=True) json_response = json.loads(response.read()) # Adds any assets found to the array of assets. assets += json_response['data']['Data'] # If we've retrieved all of our assets, breaks out of the loop. if len(json_response['data']['Data']) == 0: break cur_page += 1 num_retries = 0 except: self.log.error("Error while communicating with Collins, retrying:\n%s", traceback.format_exc()) num_retries += 1 return assets def is_cache_valid(self): """ Determines if the cache files have expired, or if it is still valid """ if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_inventory): return True return False def read_settings(self): """ Reads the settings from the collins.ini file """ config_loc = os.getenv('COLLINS_CONFIG', os.path.dirname(os.path.realpath(__file__)) + '/collins.ini') config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/collins.ini') self.collins_host = config.get('collins', 'host') self.collins_username = os.getenv('COLLINS_USERNAME', config.get('collins', 'username')) self.collins_password = os.getenv('COLLINS_PASSWORD', config.get('collins', 'password')) self.collins_asset_type = os.getenv('COLLINS_ASSET_TYPE', config.get('collins', 'asset_type')) self.collins_timeout_secs = config.getint('collins', 'timeout_secs') self.collins_max_retries = config.getint('collins', 'max_retries') self.results_per_query = config.getint('collins', 'results_per_query') self.ip_address_index = config.getint('collins', 'ip_address_index') self.query_remote_dcs = config.getboolean('collins', 'query_remote_dcs') self.prefer_hostnames = config.getboolean('collins', 'prefer_hostnames') cache_path = config.get('collins', 'cache_path') self.cache_path_cache = cache_path + \ '/ansible-collins-%s.cache' % self.collins_asset_type self.cache_path_inventory = cache_path + \ '/ansible-collins-%s.index' % self.collins_asset_type self.cache_max_age = config.getint('collins', 'cache_max_age') log_path = config.get('collins', 'log_path') self.log_location = log_path + '/ansible-collins.log' def parse_cli_args(self): """ Command line argument processing """ parser = argparse.ArgumentParser( description='Produces an Ansible Inventory file based on Collins') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to Collins ' '(default: False - use cache files)') parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print all JSON output') self.args = parser.parse_args() def update_cache(self): """ Make calls to Collins and saves the output in a cache """ self.cache = dict() self.inventory = dict() # Locates all server assets from Collins. try: server_assets = self.find_assets() except: self.log.error("Error while locating assets from Collins:\n%s", traceback.format_exc()) return False for asset in server_assets: # Determines the index to retrieve the asset's IP address either by an # attribute set on the Collins asset or the pre-configured value. if self._asset_has_attribute(asset, 'ANSIBLE_IP_INDEX'): ip_index = self._asset_get_attribute(asset, 'ANSIBLE_IP_INDEX') try: ip_index = int(ip_index) except: self.log.error( "ANSIBLE_IP_INDEX attribute on asset %s not an integer: %s", asset, ip_index) else: ip_index = self.ip_address_index asset['COLLINS'] = {} # Attempts to locate the asset's primary identifier (hostname or IP address), # which will be used to index the asset throughout the Ansible inventory. if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'): asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME') elif 'ADDRESSES' not in asset: self.log.warning("No IP addresses found for asset '%s', skipping", asset) continue elif len(asset['ADDRESSES']) < ip_index + 1: self.log.warning( "No IP address found at index %s for asset '%s', skipping", ip_index, asset) continue else: asset_identifier = asset['ADDRESSES'][ip_index]['ADDRESS'] # Adds an asset index to the Ansible inventory based upon unpacking # the name of the asset's current STATE from its dictionary. if 'STATE' in asset['ASSET'] and asset['ASSET']['STATE']: state_inventory_key = self.to_safe( 'STATE-%s' % asset['ASSET']['STATE']['NAME']) self.push(self.inventory, state_inventory_key, asset_identifier) # Indexes asset by all user-defined Collins attributes. if 'ATTRIBS' in asset: for attrib_block in asset['ATTRIBS'].keys(): for attrib in asset['ATTRIBS'][attrib_block].keys(): asset['COLLINS'][attrib] = asset['ATTRIBS'][attrib_block][attrib] attrib_key = self.to_safe('%s-%s' % (attrib, asset['ATTRIBS'][attrib_block][attrib])) self.push(self.inventory, attrib_key, asset_identifier) # Indexes asset by all built-in Collins attributes. for attribute in asset['ASSET'].keys(): if attribute not in CollinsDefaults.SPECIAL_ATTRIBUTES: attribute_val = asset['ASSET'][attribute] if attribute_val is not None: attrib_key = self.to_safe('%s-%s' % (attribute, attribute_val)) self.push(self.inventory, attrib_key, asset_identifier) # Indexes asset by hardware product information. if 'HARDWARE' in asset: if 'PRODUCT' in asset['HARDWARE']['BASE']: product = asset['HARDWARE']['BASE']['PRODUCT'] if product: product_key = self.to_safe( 'HARDWARE-PRODUCT-%s' % asset['HARDWARE']['BASE']['PRODUCT']) self.push(self.inventory, product_key, asset_identifier) # Indexing now complete, adds the host details to the asset cache. self.cache[asset_identifier] = asset try: self.write_to_cache(self.cache, self.cache_path_cache) self.write_to_cache(self.inventory, self.cache_path_inventory) except: self.log.error("Error while writing to cache:\n%s", traceback.format_exc()) return False return True def push(self, dictionary, key, value): """ Adds a value to a list at a dictionary key, creating the list if it doesn't exist. """ if key not in dictionary: dictionary[key] = [] dictionary[key].append(value) def get_host_info(self): """ Get variables about a specific host. """ if not self.cache or len(self.cache) == 0: # Need to load index from cache self.load_cache_from_cache() if self.args.host not in self.cache: # try updating the cache self.update_cache() if self.args.host not in self.cache: # host might not exist anymore return self.json_format_dict({}, self.args.pretty) return self.json_format_dict(self.cache[self.args.host], self.args.pretty) def load_inventory_from_cache(self): """ Reads the index from the cache file sets self.index """ try: cache = open(self.cache_path_inventory, 'r') json_inventory = cache.read() self.inventory = json.loads(json_inventory) return True except: self.log.error("Error while loading inventory:\n%s", traceback.format_exc()) self.inventory = {} return False def load_cache_from_cache(self): """ Reads the cache from the cache file sets self.cache """ try: cache = open(self.cache_path_cache, 'r') json_cache = cache.read() self.cache = json.loads(json_cache) return True except: self.log.error("Error while loading host cache:\n%s", traceback.format_exc()) self.cache = {} return False def write_to_cache(self, data, filename): """ Writes data in JSON format to a specified file. """ json_data = self.json_format_dict(data, self.args.pretty) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): """ Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """ return re.sub(r"[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): """ Converts a dict to a JSON object and dumps it as a formatted string """ if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) if __name__ in '__main__': inventory = CollinsInventory() if inventory.run(): sys.exit(0) else: sys.exit(-1) ansible-2.5.1/contrib/inventory/consul_io.ini0000644000000000000000000000345013265756155021303 0ustar rootroot00000000000000# Ansible Consul external inventory script settings. [consul] # # Bulk load. Load all possible data before building inventory JSON # If true, script processes in-memory data. JSON generation reduces drastically # bulk_load = false # restrict included nodes to those from this datacenter #datacenter = nyc1 # url of the consul cluster to query #url = http://demo.consul.io url = http://localhost:8500 # suffix added to each service to create a group name e.g Service of 'redis' and # a suffix of '_servers' will add each address to the group name 'redis_servers' servers_suffix = _servers # # By default, final JSON is built based on all available info in consul. # Suffixes means that services groups will be added in addition to basic infromation. See servers_suffix for additional info # There are cases when speed is preferable than having services groups # False value will reduce script execution time dragtically. # suffixes = true # if specified then the inventory will generate domain names that will resolve # via Consul's inbuilt DNS. #domain=consul # make groups from service tags. the name of the group is derived from the # service name and the tag name e.g. a service named nginx with tags ['master', 'v1'] # will create groups nginx_master and nginx_v1 tags = true # looks up the node name at the given path for a list of groups to which the # node should be added. kv_groups=ansible/groups # looks up the node name at the given path for a json dictionary of metadata that # should be attached as metadata for the node kv_metadata=ansible/metadata # looks up the health of each service and adds the node to 'up' and 'down' groups # based on the service availibility # # !!!! if availability is true, suffixes also must be true. !!!! # availability = true available_suffix = _up unavailable_suffix = _down ansible-2.5.1/contrib/inventory/consul_io.py0000755000000000000000000004774413265756155021175 0ustar rootroot00000000000000#!/usr/bin/env python # # (c) 2015, Steve Gargan # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### ''' Consul.io inventory script (http://consul.io) ====================================== Generates Ansible inventory from nodes in a Consul cluster. This script will group nodes by: - datacenter, - registered service - service tags - service status - values from the k/v store This script can be run with the switches --list as expected groups all the nodes in all datacenters --datacenter, to restrict the nodes to a single datacenter --host to restrict the inventory to a single named node. (requires datacenter config) The configuration for this plugin is read from a consul_io.ini file located in the same directory as this inventory script. All config options in the config file are optional except the host and port, which must point to a valid agent or server running the http api. For more information on enabling the endpoint see. http://www.consul.io/docs/agent/options.html Other options include: 'datacenter': which restricts the included nodes to those from the given datacenter 'url': the URL of the Consul cluster. host, port and scheme are derived from the URL. If not specified, connection configuration defaults to http requests to localhost on port 8500. 'domain': if specified then the inventory will generate domain names that will resolve via Consul's inbuilt DNS. The name is derived from the node name, datacenter and domain .node... Note that you will need to have consul hooked into your DNS server for these to resolve. See the consul DNS docs for more info. which restricts the included nodes to those from the given datacenter 'servers_suffix': defining the a suffix to add to the service name when creating the service group. e.g Service name of 'redis' and a suffix of '_servers' will add each nodes address to the group name 'redis_servers'. No suffix is added if this is not set 'tags': boolean flag defining if service tags should be used to create Inventory groups e.g. an nginx service with the tags ['master', 'v1'] will create groups nginx_master and nginx_v1 to which the node running the service will be added. No tag groups are created if this is missing. 'token': ACL token to use to authorize access to the key value store. May be required to retrieve the kv_groups and kv_metadata based on your consul configuration. 'kv_groups': This is used to lookup groups for a node in the key value store. It specifies a path to which each discovered node's name will be added to create a key to query the key/value store. There it expects to find a comma separated list of group names to which the node should be added e.g. if the inventory contains node 'nyc-web-1' in datacenter 'nyc-dc1' and kv_groups = 'ansible/groups' then the key 'ansible/groups/nyc-dc1/nyc-web-1' will be queried for a group list. If this query returned 'test,honeypot' then the node address to both groups. 'kv_metadata': kv_metadata is used to lookup metadata for each discovered node. Like kv_groups above it is used to build a path to lookup in the kv store where it expects to find a json dictionary of metadata entries. If found, each key/value pair in the dictionary is added to the metadata for the node. eg node 'nyc-web-1' in datacenter 'nyc-dc1' and kv_metadata = 'ansible/metadata', then the key 'ansible/metadata/nyc-dc1/nyc-web-1' should contain '{"databse": "postgres"}' 'availability': if true then availability groups will be created for each service. The node will be added to one of the groups based on the health status of the service. The group name is derived from the service name and the configurable availability suffixes 'available_suffix': suffix that should be appended to the service availability groups for available services e.g. if the suffix is '_up' and the service is nginx, then nodes with healthy nginx services will be added to the nginix_up group. Defaults to '_available' 'unavailable_suffix': as above but for unhealthy services, defaults to '_unavailable' Note that if the inventory discovers an 'ssh' service running on a node it will register the port as ansible_ssh_port in the node's metadata and this port will be used to access the machine. ``` ''' import os import re import argparse import sys try: import configparser except ImportError: import ConfigParser as configparser def get_log_filename(): tty_filename = '/dev/tty' stdout_filename = '/dev/stdout' if not os.path.exists(tty_filename): return stdout_filename if not os.access(tty_filename, os.W_OK): return stdout_filename if os.getenv('TEAMCITY_VERSION'): return stdout_filename return tty_filename def setup_logging(): filename = get_log_filename() import logging.config logging.config.dictConfig({ 'version': 1, 'formatters': { 'simple': { 'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s', }, }, 'root': { 'level': os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_LEVEL', 'WARN'), 'handlers': ['console'], }, 'handlers': { 'console': { 'class': 'logging.FileHandler', 'filename': filename, 'formatter': 'simple', }, }, 'loggers': { 'iso8601': { 'qualname': 'iso8601', 'level': 'INFO', }, }, }) logger = logging.getLogger('consul_io.py') logger.debug('Invoked with %r', sys.argv) if os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_ENABLED'): setup_logging() try: import json except ImportError: import simplejson as json try: import consul except ImportError as e: sys.exit("""failed=True msg='python-consul required for this module. See http://python-consul.readthedocs.org/en/latest/#installation'""") from six import iteritems class ConsulInventory(object): def __init__(self): ''' Create an inventory based on the catalog of nodes and services registered in a consul cluster''' self.node_metadata = {} self.nodes = {} self.nodes_by_service = {} self.nodes_by_tag = {} self.nodes_by_datacenter = {} self.nodes_by_kv = {} self.nodes_by_availability = {} self.current_dc = None self.inmemory_kv = [] self.inmemory_nodes = [] config = ConsulConfig() self.config = config self.consul_api = config.get_consul_api() if config.has_config('datacenter'): if config.has_config('host'): self.load_data_for_node(config.host, config.datacenter) else: self.load_data_for_datacenter(config.datacenter) else: self.load_all_data_consul() self.combine_all_results() print(json.dumps(self.inventory, sort_keys=True, indent=2)) def bulk_load(self, datacenter): index, groups_list = self.consul_api.kv.get(self.config.kv_groups, recurse=True, dc=datacenter) index, metadata_list = self.consul_api.kv.get(self.config.kv_metadata, recurse=True, dc=datacenter) index, nodes = self.consul_api.catalog.nodes(dc=datacenter) self.inmemory_kv += groups_list self.inmemory_kv += metadata_list self.inmemory_nodes += nodes def load_all_data_consul(self): ''' cycle through each of the datacenters in the consul catalog and process the nodes in each ''' self.datacenters = self.consul_api.catalog.datacenters() for datacenter in self.datacenters: self.current_dc = datacenter self.bulk_load(datacenter) self.load_data_for_datacenter(datacenter) def load_availability_groups(self, node, datacenter): '''check the health of each service on a node and add the node to either an 'available' or 'unavailable' grouping. The suffix for each group can be controlled from the config''' if self.config.has_config('availability'): for service_name, service in iteritems(node['Services']): for node in self.consul_api.health.service(service_name)[1]: if self.is_service_available(node, service_name): suffix = self.config.get_availability_suffix( 'available_suffix', '_available') else: suffix = self.config.get_availability_suffix( 'unavailable_suffix', '_unavailable') self.add_node_to_map(self.nodes_by_availability, service_name + suffix, node['Node']) def is_service_available(self, node, service_name): '''check the availability of the service on the node beside ensuring the availability of the node itself''' consul_ok = service_ok = False for check in node['Checks']: if check['CheckID'] == 'serfHealth': consul_ok = check['Status'] == 'passing' elif check['ServiceName'] == service_name: service_ok = check['Status'] == 'passing' return consul_ok and service_ok def consul_get_kv_inmemory(self, key): result = filter(lambda x: x['Key'] == key, self.inmemory_kv) return result.pop() if result else None def consul_get_node_inmemory(self, node): result = filter(lambda x: x['Node'] == node, self.inmemory_nodes) return {"Node": result.pop(), "Services": {}} if result else None def load_data_for_datacenter(self, datacenter): '''processes all the nodes in a particular datacenter''' if self.config.bulk_load == 'true': nodes = self.inmemory_nodes else: index, nodes = self.consul_api.catalog.nodes(dc=datacenter) for node in nodes: self.add_node_to_map(self.nodes_by_datacenter, datacenter, node) self.load_data_for_node(node['Node'], datacenter) def load_data_for_node(self, node, datacenter): '''loads the data for a single node adding it to various groups based on metadata retrieved from the kv store and service availability''' if self.config.suffixes == 'true': index, node_data = self.consul_api.catalog.node(node, dc=datacenter) else: node_data = self.consul_get_node_inmemory(node) node = node_data['Node'] self.add_node_to_map(self.nodes, 'all', node) self.add_metadata(node_data, "consul_datacenter", datacenter) self.add_metadata(node_data, "consul_nodename", node['Node']) self.load_groups_from_kv(node_data) self.load_node_metadata_from_kv(node_data) if self.config.suffixes == 'true': self.load_availability_groups(node_data, datacenter) for name, service in node_data['Services'].items(): self.load_data_from_service(name, service, node_data) def load_node_metadata_from_kv(self, node_data): ''' load the json dict at the metadata path defined by the kv_metadata value and the node name add each entry in the dictionary to the node's metadata ''' node = node_data['Node'] if self.config.has_config('kv_metadata'): key = "%s/%s/%s" % (self.config.kv_metadata, self.current_dc, node['Node']) if self.config.bulk_load == 'true': metadata = self.consul_get_kv_inmemory(key) else: index, metadata = self.consul_api.kv.get(key) if metadata and metadata['Value']: try: metadata = json.loads(metadata['Value']) for k, v in metadata.items(): self.add_metadata(node_data, k, v) except: pass def load_groups_from_kv(self, node_data): ''' load the comma separated list of groups at the path defined by the kv_groups config value and the node name add the node address to each group found ''' node = node_data['Node'] if self.config.has_config('kv_groups'): key = "%s/%s/%s" % (self.config.kv_groups, self.current_dc, node['Node']) if self.config.bulk_load == 'true': groups = self.consul_get_kv_inmemory(key) else: index, groups = self.consul_api.kv.get(key) if groups and groups['Value']: for group in groups['Value'].split(','): self.add_node_to_map(self.nodes_by_kv, group.strip(), node) def load_data_from_service(self, service_name, service, node_data): '''process a service registered on a node, adding the node to a group with the service name. Each service tag is extracted and the node is added to a tag grouping also''' self.add_metadata(node_data, "consul_services", service_name, True) if self.is_service("ssh", service_name): self.add_metadata(node_data, "ansible_ssh_port", service['Port']) if self.config.has_config('servers_suffix'): service_name = service_name + self.config.servers_suffix self.add_node_to_map(self.nodes_by_service, service_name, node_data['Node']) self.extract_groups_from_tags(service_name, service, node_data) def is_service(self, target, name): return name and (name.lower() == target.lower()) def extract_groups_from_tags(self, service_name, service, node_data): '''iterates each service tag and adds the node to groups derived from the service and tag names e.g. nginx_master''' if self.config.has_config('tags') and service['Tags']: tags = service['Tags'] self.add_metadata(node_data, "consul_%s_tags" % service_name, tags) for tag in service['Tags']: tagname = service_name + '_' + tag self.add_node_to_map(self.nodes_by_tag, tagname, node_data['Node']) def combine_all_results(self): '''prunes and sorts all groupings for combination into the final map''' self.inventory = {"_meta": {"hostvars": self.node_metadata}} groupings = [self.nodes, self.nodes_by_datacenter, self.nodes_by_service, self.nodes_by_tag, self.nodes_by_kv, self.nodes_by_availability] for grouping in groupings: for name, addresses in grouping.items(): self.inventory[name] = sorted(list(set(addresses))) def add_metadata(self, node_data, key, value, is_list=False): ''' Pushed an element onto a metadata dict for the node, creating the dict if it doesn't exist ''' key = self.to_safe(key) node = self.get_inventory_name(node_data['Node']) if node in self.node_metadata: metadata = self.node_metadata[node] else: metadata = {} self.node_metadata[node] = metadata if is_list: self.push(metadata, key, value) else: metadata[key] = value def get_inventory_name(self, node_data): '''return the ip or a node name that can be looked up in consul's dns''' domain = self.config.domain if domain: node_name = node_data['Node'] if self.current_dc: return '%s.node.%s.%s' % (node_name, self.current_dc, domain) else: return '%s.node.%s' % (node_name, domain) else: return node_data['Address'] def add_node_to_map(self, map, name, node): self.push(map, name, self.get_inventory_name(node)) def push(self, my_dict, key, element): ''' Pushed an element onto an array that may not have been defined in the dict ''' key = self.to_safe(key) if key in my_dict: my_dict[key].append(element) else: my_dict[key] = [element] def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' return re.sub(r'[^A-Za-z0-9\-\.]', '_', word) def sanitize_dict(self, d): new_dict = {} for k, v in d.items(): if v is not None: new_dict[self.to_safe(str(k))] = self.to_safe(str(v)) return new_dict def sanitize_list(self, seq): new_seq = [] for d in seq: new_seq.append(self.sanitize_dict(d)) return new_seq class ConsulConfig(dict): def __init__(self): self.read_settings() self.read_cli_args() def has_config(self, name): if hasattr(self, name): return getattr(self, name) else: return False def read_settings(self): ''' Reads the settings from the consul_io.ini file (or consul.ini for backwards compatibility)''' config = configparser.SafeConfigParser() if os.path.isfile(os.path.dirname(os.path.realpath(__file__)) + '/consul_io.ini'): config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul_io.ini') else: config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul.ini') config_options = ['host', 'token', 'datacenter', 'servers_suffix', 'tags', 'kv_metadata', 'kv_groups', 'availability', 'unavailable_suffix', 'available_suffix', 'url', 'domain', 'suffixes', 'bulk_load'] for option in config_options: value = None if config.has_option('consul', option): value = config.get('consul', option).lower() setattr(self, option, value) def read_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based nodes in a Consul cluster') parser.add_argument('--list', action='store_true', help='Get all inventory variables from all nodes in the consul cluster') parser.add_argument('--host', action='store', help='Get all inventory variables about a specific consul node,' 'requires datacenter set in consul.ini.') parser.add_argument('--datacenter', action='store', help='Get all inventory about a specific consul datacenter') args = parser.parse_args() arg_names = ['host', 'datacenter'] for arg in arg_names: if getattr(args, arg): setattr(self, arg, getattr(args, arg)) def get_availability_suffix(self, suffix, default): if self.has_config(suffix): return self.has_config(suffix) return default def get_consul_api(self): '''get an instance of the api based on the supplied configuration''' host = 'localhost' port = 8500 token = None scheme = 'http' if hasattr(self, 'url'): from ansible.module_utils.six.moves.urllib.parse import urlparse o = urlparse(self.url) if o.hostname: host = o.hostname if o.port: port = o.port if o.scheme: scheme = o.scheme if hasattr(self, 'token'): token = self.token if not token: token = 'anonymous' return consul.Consul(host=host, port=port, token=token, scheme=scheme) ConsulInventory() ansible-2.5.1/contrib/inventory/digital_ocean.ini0000644000000000000000000000164513265756155022077 0ustar rootroot00000000000000# Ansible DigitalOcean external inventory script settings # [digital_ocean] # The module needs your DigitalOcean API Token. # It may also be specified on the command line via --api-token # or via the environment variables DO_API_TOKEN or DO_API_KEY # #api_token = 123456abcdefg # API calls to DigitalOcean may be slow. For this reason, we cache the results # of an API call. Set this to the path you want cache files to be written to. # One file will be written to this directory: # - ansible-digital_ocean.cache # cache_path = /tmp # The number of seconds a cache file is considered valid. After this many # seconds, a new API call will be made, and the cache file will be updated. # cache_max_age = 300 # Use the private network IP address instead of the public when available. # use_private_network = False # Pass variables to every group, e.g.: # # group_variables = { 'ansible_user': 'root' } # group_variables = {} ansible-2.5.1/contrib/inventory/digital_ocean.py0000755000000000000000000005114313265756155021751 0ustar rootroot00000000000000#!/usr/bin/env python """ DigitalOcean external inventory script ====================================== Generates Ansible inventory of DigitalOcean Droplets. In addition to the --list and --host options used by Ansible, there are options for generating JSON of other DigitalOcean data. This is useful when creating droplets. For example, --regions will return all the DigitalOcean Regions. This information can also be easily found in the cache file, whose default location is /tmp/ansible-digital_ocean.cache). The --pretty (-p) option pretty-prints the output for better human readability. ---- Although the cache stores all the information received from DigitalOcean, the cache is not used for current droplet information (in --list, --host, --all, and --droplets). This is so that accurate droplet information is always found. You can force this script to use the cache with --force-cache. ---- Configuration is read from `digital_ocean.ini`, then from environment variables, and then from command-line arguments. Most notably, the DigitalOcean API Token must be specified. It can be specified in the INI file or with the following environment variables: export DO_API_TOKEN='abc123' or export DO_API_KEY='abc123' Alternatively, it can be passed on the command-line with --api-token. If you specify DigitalOcean credentials in the INI file, a handy way to get them into your environment (e.g., to use the digital_ocean module) is to use the output of the --env option with export: export $(digital_ocean.py --env) ---- The following groups are generated from --list: - ID (droplet ID) - NAME (droplet NAME) - image_ID - image_NAME - distro_NAME (distribution NAME from image) - region_NAME - size_NAME - status_STATUS For each host, the following variables are registered: - do_backup_ids - do_created_at - do_disk - do_features - list - do_id - do_image - object - do_ip_address - do_private_ip_address - do_kernel - object - do_locked - do_memory - do_name - do_networks - object - do_next_backup_window - do_region - object - do_size - object - do_size_slug - do_snapshot_ids - list - do_status - do_tags - do_vcpus - do_volume_ids ----- ``` usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] [--droplets] [--regions] [--images] [--sizes] [--ssh-keys] [--domains] [--tags] [--pretty] [--cache-path CACHE_PATH] [--cache-max_age CACHE_MAX_AGE] [--force-cache] [--refresh-cache] [--env] [--api-token API_TOKEN] Produce an Ansible Inventory file based on DigitalOcean credentials optional arguments: -h, --help show this help message and exit --list List all active Droplets as Ansible inventory (default: True) --host HOST Get all Ansible inventory variables about a specific Droplet --all List all DigitalOcean information as JSON --droplets, -d List Droplets as JSON --regions List Regions as JSON --images List Images as JSON --sizes List Sizes as JSON --ssh-keys List SSH keys as JSON --domains List Domains as JSON --tags List Tags as JSON --pretty, -p Pretty-print results --cache-path CACHE_PATH Path to the cache files (default: .) --cache-max_age CACHE_MAX_AGE Maximum age of the cached items (default: 0) --force-cache Only use data from the cache --refresh-cache, -r Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files) --env, -e Display DO_API_TOKEN --api-token API_TOKEN, -a API_TOKEN DigitalOcean API Token ``` """ # (c) 2013, Evan Wies # (c) 2017, Ansible Project # (c) 2017, Abhijeet Kasurde # # Inspired by the EC2 inventory plugin: # https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### import argparse import ast import os import re import requests import sys from time import time try: import ConfigParser except ImportError: import configparser as ConfigParser try: import json except ImportError: import simplejson as json class DoManager: def __init__(self, api_token): self.api_token = api_token self.api_endpoint = 'https://api.digitalocean.com/v2' self.headers = {'Authorization': 'Bearer {0}'.format(self.api_token), 'Content-type': 'application/json'} self.timeout = 60 def _url_builder(self, path): if path[0] == '/': path = path[1:] return '%s/%s' % (self.api_endpoint, path) def send(self, url, method='GET', data=None): url = self._url_builder(url) data = json.dumps(data) try: if method == 'GET': resp_data = {} incomplete = True while incomplete: resp = requests.get(url, data=data, headers=self.headers, timeout=self.timeout) json_resp = resp.json() for key, value in json_resp.items(): if isinstance(value, list) and key in resp_data: resp_data[key] += value else: resp_data[key] = value try: url = json_resp['links']['pages']['next'] except KeyError: incomplete = False except ValueError as e: sys.exit("Unable to parse result from %s: %s" % (url, e)) return resp_data def all_active_droplets(self): resp = self.send('droplets/') return resp['droplets'] def all_regions(self): resp = self.send('regions/') return resp['regions'] def all_images(self, filter_name='global'): params = {'filter': filter_name} resp = self.send('images/', data=params) return resp['images'] def sizes(self): resp = self.send('sizes/') return resp['sizes'] def all_ssh_keys(self): resp = self.send('account/keys') return resp['ssh_keys'] def all_domains(self): resp = self.send('domains/') return resp['domains'] def show_droplet(self, droplet_id): resp = self.send('droplets/%s' % droplet_id) return resp['droplet'] def all_tags(self): resp = self.send('tags/') return resp['tags'] class DigitalOceanInventory(object): ########################################################################### # Main execution path ########################################################################### def __init__(self): """Main execution path """ # DigitalOceanInventory data self.data = {} # All DigitalOcean data self.inventory = {} # Ansible Inventory # Define defaults self.cache_path = '.' self.cache_max_age = 0 self.use_private_network = False self.group_variables = {} # Read settings, environment variables, and CLI arguments self.read_settings() self.read_environment() self.read_cli_args() # Verify credentials were set if not hasattr(self, 'api_token'): msg = 'Could not find values for DigitalOcean api_token. They must be specified via either ini file, ' \ 'command line argument (--api-token), or environment variables (DO_API_TOKEN)\n' sys.stderr.write(msg) sys.exit(-1) # env command, show DigitalOcean credentials if self.args.env: print("DO_API_TOKEN=%s" % self.api_token) sys.exit(0) # Manage cache self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" self.cache_refreshed = False if self.is_cache_valid(): self.load_from_cache() if len(self.data) == 0: if self.args.force_cache: sys.stderr.write('Cache is empty and --force-cache was specified\n') sys.exit(-1) self.manager = DoManager(self.api_token) # Pick the json_data to print based on the CLI command if self.args.droplets: self.load_from_digital_ocean('droplets') json_data = {'droplets': self.data['droplets']} elif self.args.regions: self.load_from_digital_ocean('regions') json_data = {'regions': self.data['regions']} elif self.args.images: self.load_from_digital_ocean('images') json_data = {'images': self.data['images']} elif self.args.sizes: self.load_from_digital_ocean('sizes') json_data = {'sizes': self.data['sizes']} elif self.args.ssh_keys: self.load_from_digital_ocean('ssh_keys') json_data = {'ssh_keys': self.data['ssh_keys']} elif self.args.domains: self.load_from_digital_ocean('domains') json_data = {'domains': self.data['domains']} elif self.args.tags: self.load_from_digital_ocean('tags') json_data = {'tags': self.data['tags']} elif self.args.all: self.load_from_digital_ocean() json_data = self.data elif self.args.host: json_data = self.load_droplet_variables_for_host() else: # '--list' this is last to make it default self.load_from_digital_ocean('droplets') self.build_inventory() json_data = self.inventory if self.cache_refreshed: self.write_to_cache() if self.args.pretty: print(json.dumps(json_data, sort_keys=True, indent=2)) else: print(json.dumps(json_data)) ########################################################################### # Script configuration ########################################################################### def read_settings(self): """ Reads the settings from the digital_ocean.ini file """ config = ConfigParser.SafeConfigParser() config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'digital_ocean.ini') config.read(config_path) # Credentials if config.has_option('digital_ocean', 'api_token'): self.api_token = config.get('digital_ocean', 'api_token') # Cache related if config.has_option('digital_ocean', 'cache_path'): self.cache_path = config.get('digital_ocean', 'cache_path') if config.has_option('digital_ocean', 'cache_max_age'): self.cache_max_age = config.getint('digital_ocean', 'cache_max_age') # Private IP Address if config.has_option('digital_ocean', 'use_private_network'): self.use_private_network = config.getboolean('digital_ocean', 'use_private_network') # Group variables if config.has_option('digital_ocean', 'group_variables'): self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables')) def read_environment(self): """ Reads the settings from environment variables """ # Setup credentials if os.getenv("DO_API_TOKEN"): self.api_token = os.getenv("DO_API_TOKEN") if os.getenv("DO_API_KEY"): self.api_token = os.getenv("DO_API_KEY") def read_cli_args(self): """ Command line argument processing """ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials') parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)') parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet') parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON') parser.add_argument('--droplets', '-d', action='store_true', help='List Droplets as JSON') parser.add_argument('--regions', action='store_true', help='List Regions as JSON') parser.add_argument('--images', action='store_true', help='List Images as JSON') parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON') parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON') parser.add_argument('--domains', action='store_true', help='List Domains as JSON') parser.add_argument('--tags', action='store_true', help='List Tags as JSON') parser.add_argument('--pretty', '-p', action='store_true', help='Pretty-print results') parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') parser.add_argument('--refresh-cache', '-r', action='store_true', default=False, help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') parser.add_argument('--env', '-e', action='store_true', help='Display DO_API_TOKEN') parser.add_argument('--api-token', '-a', action='store', help='DigitalOcean API Token') self.args = parser.parse_args() if self.args.api_token: self.api_token = self.args.api_token # Make --list default if none of the other commands are specified if (not self.args.droplets and not self.args.regions and not self.args.images and not self.args.sizes and not self.args.ssh_keys and not self.args.domains and not self.args.tags and not self.args.all and not self.args.host): self.args.list = True ########################################################################### # Data Management ########################################################################### def load_from_digital_ocean(self, resource=None): """Get JSON from DigitalOcean API """ if self.args.force_cache and os.path.isfile(self.cache_filename): return # We always get fresh droplets if self.is_cache_valid() and not (resource == 'droplets' or resource is None): return if self.args.refresh_cache: resource = None if resource == 'droplets' or resource is None: self.data['droplets'] = self.manager.all_active_droplets() self.cache_refreshed = True if resource == 'regions' or resource is None: self.data['regions'] = self.manager.all_regions() self.cache_refreshed = True if resource == 'images' or resource is None: self.data['images'] = self.manager.all_images() self.cache_refreshed = True if resource == 'sizes' or resource is None: self.data['sizes'] = self.manager.sizes() self.cache_refreshed = True if resource == 'ssh_keys' or resource is None: self.data['ssh_keys'] = self.manager.all_ssh_keys() self.cache_refreshed = True if resource == 'domains' or resource is None: self.data['domains'] = self.manager.all_domains() self.cache_refreshed = True if resource == 'tags' or resource is None: self.data['tags'] = self.manager.all_tags() self.cache_refreshed = True def build_inventory(self): """ Build Ansible inventory of droplets """ self.inventory = { 'all': { 'hosts': [], 'vars': self.group_variables }, '_meta': {'hostvars': {}} } # add all droplets by id and name for droplet in self.data['droplets']: for net in droplet['networks']['v4']: if net['type'] == 'public': dest = net['ip_address'] else: continue self.inventory['all']['hosts'].append(dest) self.inventory[droplet['id']] = [dest] self.inventory[droplet['name']] = [dest] # groups that are always present for group in ('region_' + droplet['region']['slug'], 'image_' + str(droplet['image']['id']), 'size_' + droplet['size']['slug'], 'distro_' + DigitalOceanInventory.to_safe(droplet['image']['distribution']), 'status_' + droplet['status']): if group not in self.inventory: self.inventory[group] = {'hosts': [], 'vars': {}} self.inventory[group]['hosts'].append(dest) # groups that are not always present for group in (droplet['image']['slug'], droplet['image']['name']): if group: image = 'image_' + DigitalOceanInventory.to_safe(group) if image not in self.inventory: self.inventory[image] = {'hosts': [], 'vars': {}} self.inventory[image]['hosts'].append(dest) if droplet['tags']: for tag in droplet['tags']: if tag not in self.inventory: self.inventory[tag] = {'hosts': [], 'vars': {}} self.inventory[tag]['hosts'].append(dest) # hostvars info = self.do_namespace(droplet) self.inventory['_meta']['hostvars'][dest] = info def load_droplet_variables_for_host(self): """ Generate a JSON response to a --host call """ host = int(self.args.host) droplet = self.manager.show_droplet(host) info = self.do_namespace(droplet) return {'droplet': info} ########################################################################### # Cache Management ########################################################################### def is_cache_valid(self): """ Determines if the cache files have expired, or if it is still valid """ if os.path.isfile(self.cache_filename): mod_time = os.path.getmtime(self.cache_filename) current_time = time() if (mod_time + self.cache_max_age) > current_time: return True return False def load_from_cache(self): """ Reads the data from the cache file and assigns it to member variables as Python Objects """ try: with open(self.cache_filename, 'r') as cache: json_data = cache.read() data = json.loads(json_data) except IOError: data = {'data': {}, 'inventory': {}} self.data = data['data'] self.inventory = data['inventory'] def write_to_cache(self): """ Writes data in JSON format to a file """ data = {'data': self.data, 'inventory': self.inventory} json_data = json.dumps(data, sort_keys=True, indent=2) with open(self.cache_filename, 'w') as cache: cache.write(json_data) ########################################################################### # Utilities ########################################################################### @staticmethod def to_safe(word): """ Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """ return re.sub(r"[^A-Za-z0-9\-.]", "_", word) @staticmethod def do_namespace(data): """ Returns a copy of the dictionary with all the keys put in a 'do_' namespace """ info = {} for k, v in data.items(): info['do_' + k] = v return info ########################################################################### # Run the script DigitalOceanInventory() ansible-2.5.1/contrib/inventory/docker.py0000755000000000000000000010260513265756155020436 0ustar rootroot00000000000000#!/usr/bin/env python # # (c) 2016 Paul Durivage # Chris Houseknecht # James Tanner # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # DOCUMENTATION = ''' Docker Inventory Script ======================= The inventory script generates dynamic inventory by making API requests to one or more Docker APIs. It's dynamic because the inventory is generated at run-time rather than being read from a static file. The script generates the inventory by connecting to one or many Docker APIs and inspecting the containers it finds at each API. Which APIs the script contacts can be defined using environment variables or a configuration file. Requirements ------------ Using the docker modules requires having docker-py installed on the host running Ansible. To install docker-py: pip install docker-py Run for Specific Host --------------------- When run for a specific container using the --host option this script returns the following hostvars: { "ansible_ssh_host": "", "ansible_ssh_port": 0, "docker_apparmorprofile": "", "docker_args": [], "docker_config": { "AttachStderr": false, "AttachStdin": false, "AttachStdout": false, "Cmd": [ "/hello" ], "Domainname": "", "Entrypoint": null, "Env": null, "Hostname": "9f2f80b0a702", "Image": "hello-world", "Labels": {}, "OnBuild": null, "OpenStdin": false, "StdinOnce": false, "Tty": false, "User": "", "Volumes": null, "WorkingDir": "" }, "docker_created": "2016-04-18T02:05:59.659599249Z", "docker_driver": "aufs", "docker_execdriver": "native-0.2", "docker_execids": null, "docker_graphdriver": { "Data": null, "Name": "aufs" }, "docker_hostconfig": { "Binds": null, "BlkioWeight": 0, "CapAdd": null, "CapDrop": null, "CgroupParent": "", "ConsoleSize": [ 0, 0 ], "ContainerIDFile": "", "CpuPeriod": 0, "CpuQuota": 0, "CpuShares": 0, "CpusetCpus": "", "CpusetMems": "", "Devices": null, "Dns": null, "DnsOptions": null, "DnsSearch": null, "ExtraHosts": null, "GroupAdd": null, "IpcMode": "", "KernelMemory": 0, "Links": null, "LogConfig": { "Config": {}, "Type": "json-file" }, "LxcConf": null, "Memory": 0, "MemoryReservation": 0, "MemorySwap": 0, "MemorySwappiness": null, "NetworkMode": "default", "OomKillDisable": false, "PidMode": "host", "PortBindings": null, "Privileged": false, "PublishAllPorts": false, "ReadonlyRootfs": false, "RestartPolicy": { "MaximumRetryCount": 0, "Name": "" }, "SecurityOpt": [ "label:disable" ], "UTSMode": "", "Ulimits": null, "VolumeDriver": "", "VolumesFrom": null }, "docker_hostnamepath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hostname", "docker_hostspath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hosts", "docker_id": "9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14", "docker_image": "0a6ba66e537a53a5ea94f7c6a99c534c6adb12e3ed09326d4bf3b38f7c3ba4e7", "docker_logpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/9f2f80b0a702361d1ac432e6a-json.log", "docker_mountlabel": "", "docker_mounts": [], "docker_name": "/hello-world", "docker_networksettings": { "Bridge": "", "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "HairpinMode": false, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "LinkLocalIPv6Address": "", "LinkLocalIPv6PrefixLen": 0, "MacAddress": "", "Networks": { "bridge": { "EndpointID": "", "Gateway": "", "GlobalIPv6Address": "", "GlobalIPv6PrefixLen": 0, "IPAddress": "", "IPPrefixLen": 0, "IPv6Gateway": "", "MacAddress": "" } }, "Ports": null, "SandboxID": "", "SandboxKey": "", "SecondaryIPAddresses": null, "SecondaryIPv6Addresses": null }, "docker_path": "/hello", "docker_processlabel": "", "docker_resolvconfpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/resolv.conf", "docker_restartcount": 0, "docker_short_id": "9f2f80b0a7023", "docker_state": { "Dead": false, "Error": "", "ExitCode": 0, "FinishedAt": "2016-04-18T02:06:00.296619369Z", "OOMKilled": false, "Paused": false, "Pid": 0, "Restarting": false, "Running": false, "StartedAt": "2016-04-18T02:06:00.272065041Z", "Status": "exited" } } Groups ------ When run in --list mode (the default), container instances are grouped by: - container id - container name - container short id - image_name (image_) - docker_host - running - stopped Configuration: -------------- You can control the behavior of the inventory script by passing arguments, defining environment variables, or creating a configuration file named docker.yml (sample provided in ansible/contrib/inventory). The order of precedence is command line args, then the docker.yml file and finally environment variables. Environment variables: ...................... To connect to a single Docker API the following variables can be defined in the environment to control the connection options. These are the same environment variables used by the Docker modules. DOCKER_HOST The URL or Unix socket path used to connect to the Docker API. Defaults to unix://var/run/docker.sock. DOCKER_API_VERSION: The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported by docker-py. DOCKER_TIMEOUT: The maximum amount of time in seconds to wait on a response fromm the API. Defaults to 60 seconds. DOCKER_TLS: Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server. Defaults to False. DOCKER_TLS_VERIFY: Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. Default is False DOCKER_TLS_HOSTNAME: When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults to localhost. DOCKER_CERT_PATH: Path to the directory containing the client certificate, client key and CA certificate. DOCKER_SSL_VERSION: Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing was 1.0 In addition to the connection variables there are a couple variables used to control the execution and output of the script: DOCKER_CONFIG_FILE Path to the configuration file. Defaults to ./docker.yml. DOCKER_PRIVATE_SSH_PORT: The private port (container port) on which SSH is listening for connections. Defaults to 22. DOCKER_DEFAULT_IP: The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'. Configuration File .................. Using a configuration file provides a means for defining a set of Docker APIs from which to build an inventory. The default name of the file is derived from the name of the inventory script. By default the script will look for basename of the script (i.e. docker) with an extension of '.yml'. You can also override the default name of the script by defining DOCKER_CONFIG_FILE in the environment. Here's what you can define in docker_inventory.yml: defaults Defines a default connection. Defaults will be taken from this and applied to any values not provided for a host defined in the hosts list. hosts If you wish to get inventory from more than one Docker host, define a hosts list. For the default host and each host in the hosts list define the following attributes: host: description: The URL or Unix socket path used to connect to the Docker API. required: yes tls: description: Connect using TLS without verifying the authenticity of the Docker host server. default: false required: false tls_verify: description: Connect using TLS without verifying the authenticity of the Docker host server. default: false required: false cert_path: description: Path to the client's TLS certificate file. default: null required: false cacert_path: description: Use a CA certificate when performing server verification by providing the path to a CA certificate file. default: null required: false key_path: description: Path to the client's TLS key file. default: null required: false version: description: The Docker API version. required: false default: will be supplied by the docker-py module. timeout: description: The amount of time in seconds to wait on an API response. required: false default: 60 default_ip: description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'. required: false default: 127.0.0.1 private_ssh_port: description: The port containers use for SSH required: false default: 22 Examples -------- # Connect to the Docker API on localhost port 4243 and format the JSON output DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty # Any container's ssh port exposed on 0.0.0.0 will be mapped to # another IP address (where Ansible will attempt to connect via SSH) DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty # Run as input to a playbook: ansible-playbook -i ~/projects/ansible/contrib/inventory/docker.py docker_inventory_test.yml # Simple playbook to invoke with the above example: - name: Test docker_inventory hosts: all connection: local gather_facts: no tasks: - debug: msg="Container - {{ inventory_hostname }}" ''' import os import sys import json import argparse import re import yaml from collections import defaultdict # Manipulation of the path is needed because the docker-py # module is imported by the name docker, and because this file # is also named docker for path in [os.getcwd(), '', os.path.dirname(os.path.abspath(__file__))]: try: del sys.path[sys.path.index(path)] except: pass HAS_DOCKER_PY = True HAS_DOCKER_ERROR = False try: from docker.errors import APIError, TLSParameterError from docker.tls import TLSConfig from docker.constants import DEFAULT_TIMEOUT_SECONDS, DEFAULT_DOCKER_API_VERSION except ImportError as exc: HAS_DOCKER_ERROR = str(exc) HAS_DOCKER_PY = False # Client has recently been split into DockerClient and APIClient try: from docker import Client except ImportError as exc: try: from docker import APIClient as Client except ImportError as exc: HAS_DOCKER_ERROR = str(exc) HAS_DOCKER_PY = False class Client: pass DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock' DEFAULT_TLS = False DEFAULT_TLS_VERIFY = False DEFAULT_IP = '127.0.0.1' DEFAULT_SSH_PORT = '22' BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True] BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False] DOCKER_ENV_ARGS = dict( config_file='DOCKER_CONFIG_FILE', docker_host='DOCKER_HOST', api_version='DOCKER_API_VERSION', cert_path='DOCKER_CERT_PATH', ssl_version='DOCKER_SSL_VERSION', tls='DOCKER_TLS', tls_verify='DOCKER_TLS_VERIFY', timeout='DOCKER_TIMEOUT', private_ssh_port='DOCKER_DEFAULT_SSH_PORT', default_ip='DOCKER_DEFAULT_IP', ) def fail(msg): sys.stderr.write("%s\n" % msg) sys.exit(1) def log(msg, pretty_print=False): if pretty_print: print(json.dumps(msg, sort_keys=True, indent=2)) else: print(msg + u'\n') class AnsibleDockerClient(Client): def __init__(self, auth_params, debug): self.auth_params = auth_params self.debug = debug self._connect_params = self._get_connect_params() try: super(AnsibleDockerClient, self).__init__(**self._connect_params) except APIError as exc: self.fail("Docker API error: %s" % exc) except Exception as exc: self.fail("Error connecting: %s" % exc) def fail(self, msg): fail(msg) def log(self, msg, pretty_print=False): if self.debug: log(msg, pretty_print) def _get_tls_config(self, **kwargs): self.log("get_tls_config:") for key in kwargs: self.log(" %s: %s" % (key, kwargs[key])) try: tls_config = TLSConfig(**kwargs) return tls_config except TLSParameterError as exc: self.fail("TLS config error: %s" % exc) def _get_connect_params(self): auth = self.auth_params self.log("auth params:") for key in auth: self.log(" %s: %s" % (key, auth[key])) if auth['tls'] or auth['tls_verify']: auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://') if auth['tls'] and auth['cert_path'] and auth['key_path']: # TLS with certs and no host verification tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), verify=False, ssl_version=auth['ssl_version']) return dict(base_url=auth['docker_host'], tls=tls_config, version=auth['api_version'], timeout=auth['timeout']) if auth['tls']: # TLS with no certs and not host verification tls_config = self._get_tls_config(verify=False, ssl_version=auth['ssl_version']) return dict(base_url=auth['docker_host'], tls=tls_config, version=auth['api_version'], timeout=auth['timeout']) if auth['tls_verify'] and auth['cert_path'] and auth['key_path']: # TLS with certs and host verification if auth['cacert_path']: tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), ca_cert=auth['cacert_path'], verify=True, assert_hostname=auth['tls_hostname'], ssl_version=auth['ssl_version']) else: tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']), verify=True, assert_hostname=auth['tls_hostname'], ssl_version=auth['ssl_version']) return dict(base_url=auth['docker_host'], tls=tls_config, version=auth['api_version'], timeout=auth['timeout']) if auth['tls_verify'] and auth['cacert_path']: # TLS with cacert only tls_config = self._get_tls_config(ca_cert=auth['cacert_path'], assert_hostname=auth['tls_hostname'], verify=True, ssl_version=auth['ssl_version']) return dict(base_url=auth['docker_host'], tls=tls_config, version=auth['api_version'], timeout=auth['timeout']) if auth['tls_verify']: # TLS with verify and no certs tls_config = self._get_tls_config(verify=True, assert_hostname=auth['tls_hostname'], ssl_version=auth['ssl_version']) return dict(base_url=auth['docker_host'], tls=tls_config, version=auth['api_version'], timeout=auth['timeout']) # No TLS return dict(base_url=auth['docker_host'], version=auth['api_version'], timeout=auth['timeout']) def _handle_ssl_error(self, error): match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error)) if match: msg = "You asked for verification that Docker host name matches %s. The actual hostname is %s. " \ "Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. " \ "You may also use TLS without verification by setting the tls parameter to true." \ % (self.auth_params['tls_hostname'], match.group(1), match.group(1)) self.fail(msg) self.fail("SSL Exception: %s" % (error)) class EnvArgs(object): def __init__(self): self.config_file = None self.docker_host = None self.api_version = None self.cert_path = None self.ssl_version = None self.tls = None self.tls_verify = None self.tls_hostname = None self.timeout = None self.default_ssh_port = None self.default_ip = None class DockerInventory(object): def __init__(self): self._args = self._parse_cli_args() self._env_args = self._parse_env_args() self.groups = defaultdict(list) self.hostvars = defaultdict(dict) def run(self): config_from_file = self._parse_config_file() if not config_from_file: config_from_file = dict() docker_hosts = self.get_hosts(config_from_file) for host in docker_hosts: client = AnsibleDockerClient(host, self._args.debug) self.get_inventory(client, host) if not self._args.host: self.groups['docker_hosts'] = [host.get('docker_host') for host in docker_hosts] self.groups['_meta'] = dict( hostvars=self.hostvars ) print(self._json_format_dict(self.groups, pretty_print=self._args.pretty)) else: print(self._json_format_dict(self.hostvars.get(self._args.host, dict()), pretty_print=self._args.pretty)) sys.exit(0) def get_inventory(self, client, host): ssh_port = host.get('default_ssh_port') default_ip = host.get('default_ip') hostname = host.get('docker_host') try: containers = client.containers(all=True) except Exception as exc: self.fail("Error fetching containers for host %s - %s" % (hostname, str(exc))) for container in containers: id = container.get('Id') short_id = id[:13] try: name = container.get('Names', list()).pop(0).lstrip('/') except IndexError: name = short_id if not self._args.host or (self._args.host and self._args.host in [name, id, short_id]): try: inspect = client.inspect_container(id) except Exception as exc: self.fail("Error inspecting container %s - %s" % (name, str(exc))) running = inspect.get('State', dict()).get('Running') # Add container to groups image_name = inspect.get('Config', dict()).get('Image') if image_name: self.groups["image_%s" % (image_name)].append(name) self.groups[id].append(name) self.groups[name].append(name) if short_id not in self.groups: self.groups[short_id].append(name) self.groups[hostname].append(name) if running is True: self.groups['running'].append(name) else: self.groups['stopped'].append(name) # Figure ous ssh IP and Port try: # Lookup the public facing port Nat'ed to ssh port. port = client.port(container, ssh_port)[0] except (IndexError, AttributeError, TypeError): port = dict() try: ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp'] except KeyError: ip = '' facts = dict( ansible_ssh_host=ip, ansible_ssh_port=port.get('HostPort', int()), docker_name=name, docker_short_id=short_id ) for key in inspect: fact_key = self._slugify(key) facts[fact_key] = inspect.get(key) self.hostvars[name].update(facts) def _slugify(self, value): return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_')) def get_hosts(self, config): ''' Determine the list of docker hosts we need to talk to. :param config: dictionary read from config file. can be empty. :return: list of connection dictionaries ''' hosts = list() hosts_list = config.get('hosts') defaults = config.get('defaults', dict()) self.log('defaults:') self.log(defaults, pretty_print=True) def_host = defaults.get('host') def_tls = defaults.get('tls') def_tls_verify = defaults.get('tls_verify') def_tls_hostname = defaults.get('tls_hostname') def_ssl_version = defaults.get('ssl_version') def_cert_path = defaults.get('cert_path') def_cacert_path = defaults.get('cacert_path') def_key_path = defaults.get('key_path') def_version = defaults.get('version') def_timeout = defaults.get('timeout') def_ip = defaults.get('default_ip') def_ssh_port = defaults.get('private_ssh_port') if hosts_list: # use hosts from config file for host in hosts_list: docker_host = host.get('host') or def_host or self._args.docker_host or \ self._env_args.docker_host or DEFAULT_DOCKER_HOST api_version = host.get('version') or def_version or self._args.api_version or \ self._env_args.api_version or DEFAULT_DOCKER_API_VERSION tls_hostname = host.get('tls_hostname') or def_tls_hostname or self._args.tls_hostname or \ self._env_args.tls_hostname tls_verify = host.get('tls_verify') or def_tls_verify or self._args.tls_verify or \ self._env_args.tls_verify or DEFAULT_TLS_VERIFY tls = host.get('tls') or def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS ssl_version = host.get('ssl_version') or def_ssl_version or self._args.ssl_version or \ self._env_args.ssl_version cert_path = host.get('cert_path') or def_cert_path or self._args.cert_path or \ self._env_args.cert_path if cert_path and cert_path == self._env_args.cert_path: cert_path = os.path.join(cert_path, 'cert.pem') cacert_path = host.get('cacert_path') or def_cacert_path or self._args.cacert_path or \ self._env_args.cert_path if cacert_path and cacert_path == self._env_args.cert_path: cacert_path = os.path.join(cacert_path, 'ca.pem') key_path = host.get('key_path') or def_key_path or self._args.key_path or \ self._env_args.cert_path if key_path and key_path == self._env_args.cert_path: key_path = os.path.join(key_path, 'key.pem') timeout = host.get('timeout') or def_timeout or self._args.timeout or self._env_args.timeout or \ DEFAULT_TIMEOUT_SECONDS default_ip = host.get('default_ip') or def_ip or self._args.default_ip_address or \ DEFAULT_IP default_ssh_port = host.get('private_ssh_port') or def_ssh_port or self._args.private_ssh_port or \ DEFAULT_SSH_PORT host_dict = dict( docker_host=docker_host, api_version=api_version, tls=tls, tls_verify=tls_verify, tls_hostname=tls_hostname, cert_path=cert_path, cacert_path=cacert_path, key_path=key_path, ssl_version=ssl_version, timeout=timeout, default_ip=default_ip, default_ssh_port=default_ssh_port, ) hosts.append(host_dict) else: # use default definition docker_host = def_host or self._args.docker_host or self._env_args.docker_host or DEFAULT_DOCKER_HOST api_version = def_version or self._args.api_version or self._env_args.api_version or \ DEFAULT_DOCKER_API_VERSION tls_hostname = def_tls_hostname or self._args.tls_hostname or self._env_args.tls_hostname tls_verify = def_tls_verify or self._args.tls_verify or self._env_args.tls_verify or DEFAULT_TLS_VERIFY tls = def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS ssl_version = def_ssl_version or self._args.ssl_version or self._env_args.ssl_version cert_path = def_cert_path or self._args.cert_path or self._env_args.cert_path if cert_path and cert_path == self._env_args.cert_path: cert_path = os.path.join(cert_path, 'cert.pem') cacert_path = def_cacert_path or self._args.cacert_path or self._env_args.cert_path if cacert_path and cacert_path == self._env_args.cert_path: cacert_path = os.path.join(cacert_path, 'ca.pem') key_path = def_key_path or self._args.key_path or self._env_args.cert_path if key_path and key_path == self._env_args.cert_path: key_path = os.path.join(key_path, 'key.pem') timeout = def_timeout or self._args.timeout or self._env_args.timeout or DEFAULT_TIMEOUT_SECONDS default_ip = def_ip or self._args.default_ip_address or DEFAULT_IP default_ssh_port = def_ssh_port or self._args.private_ssh_port or DEFAULT_SSH_PORT host_dict = dict( docker_host=docker_host, api_version=api_version, tls=tls, tls_verify=tls_verify, tls_hostname=tls_hostname, cert_path=cert_path, cacert_path=cacert_path, key_path=key_path, ssl_version=ssl_version, timeout=timeout, default_ip=default_ip, default_ssh_port=default_ssh_port, ) hosts.append(host_dict) self.log("hosts: ") self.log(hosts, pretty_print=True) return hosts def _parse_config_file(self): config = dict() config_path = None if self._args.config_file: config_path = self._args.config_file elif self._env_args.config_file: config_path = self._env_args.config_file if config_path: try: config_file = os.path.abspath(config_path) # default config path is docker.yml in same directory as this script # old behaviour is docker.yml in current directory. Handle both. if not os.path.exists(config_file): config_file = os.path.abspath(os.path.basename(config_path)) except: config_file = None if config_file and os.path.exists(config_file): with open(config_file) as f: try: config = yaml.safe_load(f.read()) except Exception as exc: self.fail("Error: parsing %s - %s" % (config_path, str(exc))) return config def log(self, msg, pretty_print=False): if self._args.debug: log(msg, pretty_print) def fail(self, msg): fail(msg) def _parse_env_args(self): args = EnvArgs() for key, value in DOCKER_ENV_ARGS.items(): if os.environ.get(value): val = os.environ.get(value) if val in BOOLEANS_TRUE: val = True if val in BOOLEANS_FALSE: val = False setattr(args, key, val) return args def _parse_cli_args(self): # Parse command line arguments basename = os.path.splitext(os.path.basename(__file__))[0] default_config = os.path.join(os.path.dirname(__file__), basename + '.yml') parser = argparse.ArgumentParser( description='Return Ansible inventory for one or more Docker hosts.') parser.add_argument('--list', action='store_true', default=True, help='List all containers (default: True)') parser.add_argument('--debug', action='store_true', default=False, help='Send debug messages to STDOUT') parser.add_argument('--host', action='store', help='Only get information for a specific container.') parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print JSON output(default: False)') parser.add_argument('--config-file', action='store', default=default_config, help="Name of the config file to use. Default is %s" % (default_config)) parser.add_argument('--docker-host', action='store', default=None, help="The base url or Unix sock path to connect to the docker daemon. Defaults to %s" % (DEFAULT_DOCKER_HOST)) parser.add_argument('--tls-hostname', action='store', default='localhost', help="Host name to expect in TLS certs. Defaults to 'localhost'") parser.add_argument('--api-version', action='store', default=None, help="Docker daemon API version. Defaults to %s" % (DEFAULT_DOCKER_API_VERSION)) parser.add_argument('--timeout', action='store', default=None, help="Docker connection timeout in seconds. Defaults to %s" % (DEFAULT_TIMEOUT_SECONDS)) parser.add_argument('--cacert-path', action='store', default=None, help="Path to the TLS certificate authority pem file.") parser.add_argument('--cert-path', action='store', default=None, help="Path to the TLS certificate pem file.") parser.add_argument('--key-path', action='store', default=None, help="Path to the TLS encryption key pem file.") parser.add_argument('--ssl-version', action='store', default=None, help="TLS version number") parser.add_argument('--tls', action='store_true', default=None, help="Use TLS. Defaults to %s" % (DEFAULT_TLS)) parser.add_argument('--tls-verify', action='store_true', default=None, help="Verify TLS certificates. Defaults to %s" % (DEFAULT_TLS_VERIFY)) parser.add_argument('--private-ssh-port', action='store', default=None, help="Default private container SSH Port. Defaults to %s" % (DEFAULT_SSH_PORT)) parser.add_argument('--default-ip-address', action='store', default=None, help="Default container SSH IP address. Defaults to %s" % (DEFAULT_IP)) return parser.parse_args() def _json_format_dict(self, data, pretty_print=False): # format inventory data for output if pretty_print: return json.dumps(data, sort_keys=True, indent=4) else: return json.dumps(data) def main(): if not HAS_DOCKER_PY: fail("Failed to import docker-py. Try `pip install docker-py` - %s" % (HAS_DOCKER_ERROR)) DockerInventory().run() main() ansible-2.5.1/contrib/inventory/docker.yml0000644000000000000000000000417613265756155020610 0ustar rootroot00000000000000# This is the configuration file for the Docker inventory script: docker_inventory.py. # # You can define the following in this file: # # defaults # Defines a default connection. Defaults will be taken from this and applied to any values not provided # for a host defined in the hosts list. # # hosts # If you wish to get inventory from more than one Docker host, define a hosts list. # # For the default host and each host in the hosts list define the following attributes: # # host: # description: The URL or Unix socket path used to connect to the Docker API. # required: yes # # tls: # description: Connect using TLS without verifying the authenticity of the Docker host server. # default: false # required: false # # tls_verify: # description: Connect using TLS without verifying the authenticity of the Docker host server. # default: false # required: false # # cert_path: # description: Path to the client's TLS certificate file. # default: null # required: false # # cacert_path: # description: Use a CA certificate when performing server verification by providing the path to a CA certificate file. # default: null # required: false # # key_path: # description: Path to the client's TLS key file. # default: null # required: false # # version: # description: The Docker API version. # required: false # default: will be supplied by the docker-py module. # # timeout: # description: The amount of time in seconds to wait on an API response. # required: false # default: 60 # # default_ip: # description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface # '0.0.0.0'. # required: false # default: 127.0.0.1 # # private_ssh_port: # description: The port containers use for SSH # required: false # default: 22 #defaults: # host: unix:///var/run/docker.sock # private_ssh_port: 22 # default_ip: 127.0.0.1 #hosts: # - host: tcp://10.45.5.16:4243 # private_ssh_port: 2022 # default_ip: 172.16.3.45 # - host: tcp://localhost:4243 # private_ssh_port: 2029 ansible-2.5.1/contrib/inventory/ec2.ini0000644000000000000000000002247113265756155017766 0ustar rootroot00000000000000# Ansible EC2 external inventory script settings # [ec2] # to talk to a private eucalyptus instance uncomment these lines # and edit edit eucalyptus_host to be the host name of your cloud controller #eucalyptus = True #eucalyptus_host = clc.cloud.domain.org # AWS regions to make calls to. Set this to 'all' to make request to all regions # in AWS and merge the results together. Alternatively, set this to a comma # separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' and do not # provide the 'regions_exclude' option. If this is set to 'auto', AWS_REGION or # AWS_DEFAULT_REGION environment variable will be read to determine the region. regions = all regions_exclude = us-gov-west-1, cn-north-1 # When generating inventory, Ansible needs to know how to address a server. # Each EC2 instance has a lot of variables associated with it. Here is the list: # http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance # Below are 2 variables that are used as the address of a server: # - destination_variable # - vpc_destination_variable # This is the normal destination variable to use. If you are running Ansible # from outside EC2, then 'public_dns_name' makes the most sense. If you are # running Ansible from within EC2, then perhaps you want to use the internal # address, and should set this to 'private_dns_name'. The key of an EC2 tag # may optionally be used; however the boto instance variables hold precedence # in the event of a collision. destination_variable = public_dns_name # This allows you to override the inventory_name with an ec2 variable, instead # of using the destination_variable above. Addressing (aka ansible_ssh_host) # will still use destination_variable. Tags should be written as 'tag_TAGNAME'. #hostname_variable = tag_Name # For server inside a VPC, using DNS names may not make sense. When an instance # has 'subnet_id' set, this variable is used. If the subnet is public, setting # this to 'ip_address' will return the public IP address. For instances in a # private subnet, this should be set to 'private_ip_address', and Ansible must # be run from within EC2. The key of an EC2 tag may optionally be used; however # the boto instance variables hold precedence in the event of a collision. # WARNING: - instances that are in the private vpc, _without_ public ip address # will not be listed in the inventory until You set: # vpc_destination_variable = private_ip_address vpc_destination_variable = ip_address # The following two settings allow flexible ansible host naming based on a # python format string and a comma-separated list of ec2 tags. Note that: # # 1) If the tags referenced are not present for some instances, empty strings # will be substituted in the format string. # 2) This overrides both destination_variable and vpc_destination_variable. # #destination_format = {0}.{1}.example.com #destination_format_tags = Name,environment # To tag instances on EC2 with the resource records that point to them from # Route53, set 'route53' to True. route53 = False # To use Route53 records as the inventory hostnames, uncomment and set # to equal the domain name you wish to use. You must also have 'route53' (above) # set to True. # route53_hostnames = .example.com # To exclude RDS instances from the inventory, uncomment and set to False. #rds = False # To exclude ElastiCache instances from the inventory, uncomment and set to False. #elasticache = False # Additionally, you can specify the list of zones to exclude looking up in # 'route53_excluded_zones' as a comma-separated list. # route53_excluded_zones = samplezone1.com, samplezone2.com # By default, only EC2 instances in the 'running' state are returned. Set # 'all_instances' to True to return all instances regardless of state. all_instances = False # By default, only EC2 instances in the 'running' state are returned. Specify # EC2 instance states to return as a comma-separated list. This # option is overridden when 'all_instances' is True. # instance_states = pending, running, shutting-down, terminated, stopping, stopped # By default, only RDS instances in the 'available' state are returned. Set # 'all_rds_instances' to True return all RDS instances regardless of state. all_rds_instances = False # Include RDS cluster information (Aurora etc.) include_rds_clusters = False # By default, only ElastiCache clusters and nodes in the 'available' state # are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes' # to True return all ElastiCache clusters and nodes, regardless of state. # # Note that all_elasticache_nodes only applies to listed clusters. That means # if you set all_elastic_clusters to false, no node will be return from # unavailable clusters, regardless of the state and to what you set for # all_elasticache_nodes. all_elasticache_replication_groups = False all_elasticache_clusters = False all_elasticache_nodes = False # API calls to EC2 are slow. For this reason, we cache the results of an API # call. Set this to the path you want cache files to be written to. Two files # will be written to this directory: # - ansible-ec2.cache # - ansible-ec2.index cache_path = ~/.ansible/tmp # The number of seconds a cache file is considered valid. After this many # seconds, a new API call will be made, and the cache file will be updated. # To disable the cache, set this value to 0 cache_max_age = 300 # Organize groups into a nested/hierarchy instead of a flat namespace. nested_groups = False # Replace - tags when creating groups to avoid issues with ansible replace_dash_in_groups = True # If set to true, any tag of the form "a,b,c" is expanded into a list # and the results are used to create additional tag_* inventory groups. expand_csv_tags = False # The EC2 inventory output can become very large. To manage its size, # configure which groups should be created. group_by_instance_id = True group_by_region = True group_by_availability_zone = True group_by_aws_account = False group_by_ami_id = True group_by_instance_type = True group_by_instance_state = False group_by_platform = True group_by_key_pair = True group_by_vpc_id = True group_by_security_group = True group_by_tag_keys = True group_by_tag_none = True group_by_route53_names = True group_by_rds_engine = True group_by_rds_parameter_group = True group_by_elasticache_engine = True group_by_elasticache_cluster = True group_by_elasticache_parameter_group = True group_by_elasticache_replication_group = True # If you only want to include hosts that match a certain regular expression # pattern_include = staging-* # If you want to exclude any hosts that match a certain regular expression # pattern_exclude = staging-* # Instance filters can be used to control which instances are retrieved for # inventory. For the full list of possible filters, please read the EC2 API # docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters # Filters are key/value pairs separated by '=', to list multiple filters use # a list separated by commas. To "AND" criteria together, use "&". Note that # the "AND" is not useful along with stack_filters and so such usage is not allowed. # See examples below. # If you want to apply multiple filters simultaneously, set stack_filters to # True. Default behaviour is to combine the results of all filters. Stacking # allows the use of multiple conditions to filter down, for example by # environment and type of host. stack_filters = False # Retrieve only instances with (key=value) env=staging tag # instance_filters = tag:env=staging # Retrieve only instances with role=webservers OR role=dbservers tag # instance_filters = tag:role=webservers,tag:role=dbservers # Retrieve only t1.micro instances OR instances with tag env=staging # instance_filters = instance-type=t1.micro,tag:env=staging # You can use wildcards in filter values also. Below will list instances which # tag Name value matches webservers1* # (ex. webservers15, webservers1a, webservers123 etc) # instance_filters = tag:Name=webservers1* # Retrieve only instances of type t1.micro that also have tag env=stage # instance_filters = instance-type=t1.micro&tag:env=stage # Retrieve instances of type t1.micro AND tag env=stage, as well as any instance # that are of type m3.large, regardless of env tag # instance_filters = instance-type=t1.micro&tag:env=stage,instance-type=m3.large # An IAM role can be assumed, so all requests are run as that role. # This can be useful for connecting across different accounts, or to limit user # access # iam_role = role-arn # A boto configuration profile may be used to separate out credentials # see http://boto.readthedocs.org/en/latest/boto_config_tut.html # boto_profile = some-boto-profile-name [credentials] # The AWS credentials can optionally be specified here. Credentials specified # here are ignored if the environment variable AWS_ACCESS_KEY_ID or # AWS_PROFILE is set, or if the boto_profile property above is set. # # Supplying AWS credentials here is not recommended, as it introduces # non-trivial security concerns. When going down this route, please make sure # to set access permissions for this file correctly, e.g. handle it the same # way as you would a private SSH key. # # Unlike the boto and AWS configure files, this section does not support # profiles. # # aws_access_key_id = AXXXXXXXXXXXXXX # aws_secret_access_key = XXXXXXXXXXXXXXXXXXX # aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX ansible-2.5.1/contrib/inventory/ec2.py0000755000000000000000000021470313265756155017643 0ustar rootroot00000000000000#!/usr/bin/env python ''' EC2 external inventory script ================================= Generates inventory that Ansible can understand by making API request to AWS EC2 using the Boto library. NOTE: This script assumes Ansible is being executed where the environment variables needed for Boto have already been set: export AWS_ACCESS_KEY_ID='AK123' export AWS_SECRET_ACCESS_KEY='abc123' Optional region environment variable if region is 'auto' This script also assumes that there is an ec2.ini file alongside it. To specify a different path to ec2.ini, define the EC2_INI_PATH environment variable: export EC2_INI_PATH=/path/to/my_ec2.ini If you're using eucalyptus you need to set the above variables and you need to define: export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus If you're using boto profiles (requires boto>=2.24.0) you can choose a profile using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using the AWS_PROFILE variable: AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html When run against a specific host, this script returns the following variables: - ec2_ami_launch_index - ec2_architecture - ec2_association - ec2_attachTime - ec2_attachment - ec2_attachmentId - ec2_block_devices - ec2_client_token - ec2_deleteOnTermination - ec2_description - ec2_deviceIndex - ec2_dns_name - ec2_eventsSet - ec2_group_name - ec2_hypervisor - ec2_id - ec2_image_id - ec2_instanceState - ec2_instance_type - ec2_ipOwnerId - ec2_ip_address - ec2_item - ec2_kernel - ec2_key_name - ec2_launch_time - ec2_monitored - ec2_monitoring - ec2_networkInterfaceId - ec2_ownerId - ec2_persistent - ec2_placement - ec2_platform - ec2_previous_state - ec2_private_dns_name - ec2_private_ip_address - ec2_publicIp - ec2_public_dns_name - ec2_ramdisk - ec2_reason - ec2_region - ec2_requester_id - ec2_root_device_name - ec2_root_device_type - ec2_security_group_ids - ec2_security_group_names - ec2_shutdown_state - ec2_sourceDestCheck - ec2_spot_instance_request_id - ec2_state - ec2_state_code - ec2_state_reason - ec2_status - ec2_subnet_id - ec2_tenancy - ec2_virtualization_type - ec2_vpc_id These variables are pulled out of a boto.ec2.instance object. There is a lack of consistency with variable spellings (camelCase and underscores) since this just loops through all variables the object exposes. It is preferred to use the ones with underscores when multiple exist. In addition, if an instance has AWS tags associated with it, each tag is a new variable named: - ec2_tag_[Key] = [Value] Security groups are comma-separated in 'ec2_security_group_ids' and 'ec2_security_group_names'. When destination_format and destination_format_tags are specified the destination_format can be built from the instance tags and attributes. The behavior will first check the user defined tags, then proceed to check instance attributes, and finally if neither are found 'nil' will be used instead. 'my_instance': { 'region': 'us-east-1', # attribute 'availability_zone': 'us-east-1a', # attribute 'private_dns_name': '172.31.0.1', # attribute 'ec2_tag_deployment': 'blue', # tag 'ec2_tag_clusterid': 'ansible', # tag 'ec2_tag_Name': 'webserver', # tag ... } Inside of the ec2.ini file the following settings are specified: ... destination_format: {0}-{1}-{2}-{3} destination_format_tags: Name,clusterid,deployment,private_dns_name ... These settings would produce a destination_format as the following: 'webserver-ansible-blue-172.31.0.1' ''' # (c) 2012, Peter Sankauskas # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### import sys import os import argparse import re from time import time import boto from boto import ec2 from boto import rds from boto import elasticache from boto import route53 from boto import sts import six from ansible.module_utils import ec2 as ec2_utils HAS_BOTO3 = False try: import boto3 # noqa HAS_BOTO3 = True except ImportError: pass from six.moves import configparser from collections import defaultdict try: import json except ImportError: import simplejson as json DEFAULTS = { 'all_elasticache_clusters': 'False', 'all_elasticache_nodes': 'False', 'all_elasticache_replication_groups': 'False', 'all_instances': 'False', 'all_rds_instances': 'False', 'aws_access_key_id': None, 'aws_secret_access_key': None, 'aws_security_token': None, 'boto_profile': None, 'cache_max_age': '300', 'cache_path': '~/.ansible/tmp', 'destination_variable': 'public_dns_name', 'elasticache': 'True', 'eucalyptus': 'False', 'eucalyptus_host': None, 'expand_csv_tags': 'False', 'group_by_ami_id': 'True', 'group_by_availability_zone': 'True', 'group_by_aws_account': 'False', 'group_by_elasticache_cluster': 'True', 'group_by_elasticache_engine': 'True', 'group_by_elasticache_parameter_group': 'True', 'group_by_elasticache_replication_group': 'True', 'group_by_instance_id': 'True', 'group_by_instance_state': 'False', 'group_by_instance_type': 'True', 'group_by_key_pair': 'True', 'group_by_platform': 'True', 'group_by_rds_engine': 'True', 'group_by_rds_parameter_group': 'True', 'group_by_region': 'True', 'group_by_route53_names': 'True', 'group_by_security_group': 'True', 'group_by_tag_keys': 'True', 'group_by_tag_none': 'True', 'group_by_vpc_id': 'True', 'hostname_variable': None, 'iam_role': None, 'include_rds_clusters': 'False', 'nested_groups': 'False', 'pattern_exclude': None, 'pattern_include': None, 'rds': 'False', 'regions': 'all', 'regions_exclude': 'us-gov-west-1, cn-north-1', 'replace_dash_in_groups': 'True', 'route53': 'False', 'route53_excluded_zones': '', 'route53_hostnames': None, 'stack_filters': 'False', 'vpc_destination_variable': 'ip_address' } class Ec2Inventory(object): def _empty_inventory(self): return {"_meta": {"hostvars": {}}} def __init__(self): ''' Main execution path ''' # Inventory grouped by instance IDs, tags, security groups, regions, # and availability zones self.inventory = self._empty_inventory() self.aws_account_id = None # Index of hostname (address) to instance ID self.index = {} # Boto profile to use (if any) self.boto_profile = None # AWS credentials. self.credentials = {} # Read settings and parse CLI arguments self.parse_cli_args() self.read_settings() # Make sure that profile_name is not passed at all if not set # as pre 2.24 boto will fall over otherwise if self.boto_profile: if not hasattr(boto.ec2.EC2Connection, 'profile_name'): self.fail_with_error("boto version must be >= 2.24 to use profile") # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() # Data to print if self.args.host: data_to_print = self.get_host_info() elif self.args.list: # Display list of instances for inventory if self.inventory == self._empty_inventory(): data_to_print = self.get_inventory_from_cache() else: data_to_print = self.json_format_dict(self.inventory, True) print(data_to_print) def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): ''' Reads the settings from the ec2.ini file ''' scriptbasename = __file__ scriptbasename = os.path.basename(scriptbasename) scriptbasename = scriptbasename.replace('.py', '') defaults = { 'ec2': { 'ini_fallback': os.path.join(os.path.dirname(__file__), 'ec2.ini'), 'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename) } } if six.PY3: config = configparser.ConfigParser(DEFAULTS) else: config = configparser.SafeConfigParser(DEFAULTS) ec2_ini_path = os.environ.get('EC2_INI_PATH', defaults['ec2']['ini_path']) ec2_ini_path = os.path.expanduser(os.path.expandvars(ec2_ini_path)) if not os.path.isfile(ec2_ini_path): ec2_ini_path = os.path.expanduser(defaults['ec2']['ini_fallback']) if os.path.isfile(ec2_ini_path): config.read(ec2_ini_path) # Add empty sections if they don't exist try: config.add_section('ec2') except configparser.DuplicateSectionError: pass try: config.add_section('credentials') except configparser.DuplicateSectionError: pass # is eucalyptus? self.eucalyptus = config.getboolean('ec2', 'eucalyptus') self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') # Regions self.regions = [] configRegions = config.get('ec2', 'regions') if (configRegions == 'all'): if self.eucalyptus_host: self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials) else: configRegions_exclude = config.get('ec2', 'regions_exclude') for regionInfo in ec2.regions(): if regionInfo.name not in configRegions_exclude: self.regions.append(regionInfo.name) else: self.regions = configRegions.split(",") if 'auto' in self.regions: env_region = os.environ.get('AWS_REGION') if env_region is None: env_region = os.environ.get('AWS_DEFAULT_REGION') self.regions = [env_region] # Destination addresses self.destination_variable = config.get('ec2', 'destination_variable') self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') self.hostname_variable = config.get('ec2', 'hostname_variable') if config.has_option('ec2', 'destination_format') and \ config.has_option('ec2', 'destination_format_tags'): self.destination_format = config.get('ec2', 'destination_format') self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',') else: self.destination_format = None self.destination_format_tags = None # Route53 self.route53_enabled = config.getboolean('ec2', 'route53') self.route53_hostnames = config.get('ec2', 'route53_hostnames') self.route53_excluded_zones = [] self.route53_excluded_zones = [a for a in config.get('ec2', 'route53_excluded_zones').split(',') if a] # Include RDS instances? self.rds_enabled = config.getboolean('ec2', 'rds') # Include RDS cluster instances? self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters') # Include ElastiCache instances? self.elasticache_enabled = config.getboolean('ec2', 'elasticache') # Return all EC2 instances? self.all_instances = config.getboolean('ec2', 'all_instances') # Instance states to be gathered in inventory. Default is 'running'. # Setting 'all_instances' to 'yes' overrides this option. ec2_valid_instance_states = [ 'pending', 'running', 'shutting-down', 'terminated', 'stopping', 'stopped' ] self.ec2_instance_states = [] if self.all_instances: self.ec2_instance_states = ec2_valid_instance_states elif config.has_option('ec2', 'instance_states'): for instance_state in config.get('ec2', 'instance_states').split(','): instance_state = instance_state.strip() if instance_state not in ec2_valid_instance_states: continue self.ec2_instance_states.append(instance_state) else: self.ec2_instance_states = ['running'] # Return all RDS instances? (if RDS is enabled) self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') # Return all ElastiCache replication groups? (if ElastiCache is enabled) self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups') # Return all ElastiCache clusters? (if ElastiCache is enabled) self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters') # Return all ElastiCache nodes? (if ElastiCache is enabled) self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes') # boto configuration profile (prefer CLI argument then environment variables then config file) self.boto_profile = self.args.boto_profile or \ os.environ.get('AWS_PROFILE') or \ config.get('ec2', 'boto_profile') # AWS credentials (prefer environment variables) if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or os.environ.get('AWS_PROFILE')): aws_access_key_id = config.get('credentials', 'aws_access_key_id') aws_secret_access_key = config.get('credentials', 'aws_secret_access_key') aws_security_token = config.get('credentials', 'aws_security_token') if aws_access_key_id: self.credentials = { 'aws_access_key_id': aws_access_key_id, 'aws_secret_access_key': aws_secret_access_key } if aws_security_token: self.credentials['security_token'] = aws_security_token # Cache related cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) if self.boto_profile: cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile) if not os.path.exists(cache_dir): os.makedirs(cache_dir) cache_name = 'ansible-ec2' cache_id = self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID', self.credentials.get('aws_access_key_id')) if cache_id: cache_name = '%s-%s' % (cache_name, cache_id) cache_name += '-' + str(abs(hash(__file__)))[1:7] self.cache_path_cache = os.path.join(cache_dir, "%s.cache" % cache_name) self.cache_path_index = os.path.join(cache_dir, "%s.index" % cache_name) self.cache_max_age = config.getint('ec2', 'cache_max_age') self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags') # Configure nested groups instead of flat namespace. self.nested_groups = config.getboolean('ec2', 'nested_groups') # Replace dash or not in group names self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups') # IAM role to assume for connection self.iam_role = config.get('ec2', 'iam_role') # Configure which groups should be created. group_by_options = [a for a in DEFAULTS if a.startswith('group_by')] for option in group_by_options: setattr(self, option, config.getboolean('ec2', option)) # Do we need to just include hosts that match a pattern? self.pattern_include = config.get('ec2', 'pattern_include') if self.pattern_include: self.pattern_include = re.compile(self.pattern_include) # Do we need to exclude hosts that match a pattern? self.pattern_exclude = config.get('ec2', 'pattern_exclude') if self.pattern_exclude: self.pattern_exclude = re.compile(self.pattern_exclude) # Do we want to stack multiple filters? self.stack_filters = config.getboolean('ec2', 'stack_filters') # Instance filters (see boto and EC2 API docs). Ignore invalid filters. self.ec2_instance_filters = [] if config.has_option('ec2', 'instance_filters'): filters = config.get('ec2', 'instance_filters') if self.stack_filters and '&' in filters: self.fail_with_error("AND filters along with stack_filter enabled is not supported.\n") filter_sets = [f for f in filters.split(',') if f] for filter_set in filter_sets: filters = {} filter_set = filter_set.strip() for instance_filter in filter_set.split("&"): instance_filter = instance_filter.strip() if not instance_filter or '=' not in instance_filter: continue filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] if not filter_key: continue filters[filter_key] = filter_value self.ec2_instance_filters.append(filters.copy()) def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile', help='Use boto profile for connections to EC2') self.args = parser.parse_args() def do_api_calls_update_cache(self): ''' Do API calls to each region, and save data in cache files ''' if self.route53_enabled: self.get_route53_records() for region in self.regions: self.get_instances_by_region(region) if self.rds_enabled: self.get_rds_instances_by_region(region) if self.elasticache_enabled: self.get_elasticache_clusters_by_region(region) self.get_elasticache_replication_groups_by_region(region) if self.include_rds_clusters: self.include_rds_clusters_by_region(region) self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def connect(self, region): ''' create connection to api server''' if self.eucalyptus: conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials) conn.APIVersion = '2010-08-31' else: conn = self.connect_to_aws(ec2, region) return conn def boto_fix_security_token_in_profile(self, connect_args): ''' monkey patch for boto issue boto/boto#2100 ''' profile = 'profile ' + self.boto_profile if boto.config.has_option(profile, 'aws_security_token'): connect_args['security_token'] = boto.config.get(profile, 'aws_security_token') return connect_args def connect_to_aws(self, module, region): connect_args = self.credentials # only pass the profile name if it's set (as it is not supported by older boto versions) if self.boto_profile: connect_args['profile_name'] = self.boto_profile self.boto_fix_security_token_in_profile(connect_args) if self.iam_role: sts_conn = sts.connect_to_region(region, **connect_args) role = sts_conn.assume_role(self.iam_role, 'ansible_dynamic_inventory') connect_args['aws_access_key_id'] = role.credentials.access_key connect_args['aws_secret_access_key'] = role.credentials.secret_key connect_args['security_token'] = role.credentials.session_token conn = module.connect_to_region(region, **connect_args) # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported if conn is None: self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region) return conn def get_instances_by_region(self, region): ''' Makes an AWS EC2 API call to the list of instances in a particular region ''' try: conn = self.connect(region) reservations = [] if self.ec2_instance_filters: if self.stack_filters: filters_dict = {} for filters in self.ec2_instance_filters: filters_dict.update(filters) reservations.extend(conn.get_all_instances(filters=filters_dict)) else: for filters in self.ec2_instance_filters: reservations.extend(conn.get_all_instances(filters=filters)) else: reservations = conn.get_all_instances() # Pull the tags back in a second step # AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not # reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags` instance_ids = [] for reservation in reservations: instance_ids.extend([instance.id for instance in reservation.instances]) max_filter_value = 199 tags = [] for i in range(0, len(instance_ids), max_filter_value): tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i + max_filter_value]})) tags_by_instance_id = defaultdict(dict) for tag in tags: tags_by_instance_id[tag.res_id][tag.name] = tag.value if (not self.aws_account_id) and reservations: self.aws_account_id = reservations[0].owner_id for reservation in reservations: for instance in reservation.instances: instance.tags = tags_by_instance_id[instance.id] self.add_instance(instance, region) except boto.exception.BotoServerError as e: if e.error_code == 'AuthFailure': error = self.get_auth_error_message() else: backend = 'Eucalyptus' if self.eucalyptus else 'AWS' error = "Error connecting to %s backend.\n%s" % (backend, e.message) self.fail_with_error(error, 'getting EC2 instances') def tags_match_filters(self, tags): ''' return True if given tags match configured filters ''' if not self.ec2_instance_filters: return True for filters in self.ec2_instance_filters: for filter_name, filter_value in filters.items(): if filter_name[:4] != 'tag:': continue filter_name = filter_name[4:] if filter_name not in tags: if self.stack_filters: return False continue if isinstance(filter_value, list): if self.stack_filters and tags[filter_name] not in filter_value: return False if not self.stack_filters and tags[filter_name] in filter_value: return True if isinstance(filter_value, six.string_types): if self.stack_filters and tags[filter_name] != filter_value: return False if not self.stack_filters and tags[filter_name] == filter_value: return True return self.stack_filters def get_rds_instances_by_region(self, region): ''' Makes an AWS API call to the list of RDS instances in a particular region ''' if not HAS_BOTO3: self.fail_with_error("Working with RDS instances requires boto3 - please install boto3 and try again", "getting RDS instances") client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials) db_instances = client.describe_db_instances() try: conn = self.connect_to_aws(rds, region) if conn: marker = None while True: instances = conn.get_all_dbinstances(marker=marker) marker = instances.marker for index, instance in enumerate(instances): # Add tags to instances. instance.arn = db_instances['DBInstances'][index]['DBInstanceArn'] tags = client.list_tags_for_resource(ResourceName=instance.arn)['TagList'] instance.tags = {} for tag in tags: instance.tags[tag['Key']] = tag['Value'] if self.tags_match_filters(instance.tags): self.add_rds_instance(instance, region) if not marker: break except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() elif e.error_code == "OptInRequired": error = "RDS hasn't been enabled for this account yet. " \ "You must either log in to the RDS service through the AWS console to enable it, " \ "or set 'rds = False' in ec2.ini" elif not e.reason == "Forbidden": error = "Looks like AWS RDS is down:\n%s" % e.message self.fail_with_error(error, 'getting RDS instances') def include_rds_clusters_by_region(self, region): if not HAS_BOTO3: self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again", "getting RDS clusters") client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials) marker, clusters = '', [] while marker is not None: resp = client.describe_db_clusters(Marker=marker) clusters.extend(resp["DBClusters"]) marker = resp.get('Marker', None) account_id = boto.connect_iam().get_user().arn.split(':')[4] c_dict = {} for c in clusters: # remove these datetime objects as there is no serialisation to json # currently in place and we don't need the data yet if 'EarliestRestorableTime' in c: del c['EarliestRestorableTime'] if 'LatestRestorableTime' in c: del c['LatestRestorableTime'] if not self.ec2_instance_filters: matches_filter = True else: matches_filter = False try: # arn:aws:rds:::: tags = client.list_tags_for_resource( ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier']) c['Tags'] = tags['TagList'] if self.ec2_instance_filters: for filters in self.ec2_instance_filters: for filter_key, filter_values in filters.items(): # get AWS tag key e.g. tag:env will be 'env' tag_name = filter_key.split(":", 1)[1] # Filter values is a list (if you put multiple values for the same tag name) matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags']) if matches_filter: # it matches a filter, so stop looking for further matches break if matches_filter: break except Exception as e: if e.message.find('DBInstanceNotFound') >= 0: # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster. # Ignore errors when trying to find tags for these pass # ignore empty clusters caused by AWS bug if len(c['DBClusterMembers']) == 0: continue elif matches_filter: c_dict[c['DBClusterIdentifier']] = c self.inventory['db_clusters'] = c_dict def get_elasticache_clusters_by_region(self, region): ''' Makes an AWS API call to the list of ElastiCache clusters (with nodes' info) in a particular region.''' # ElastiCache boto module doesn't provide a get_all_instances method, # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: conn = self.connect_to_aws(elasticache, region) if conn: # show_cache_node_info = True # because we also want nodes' information response = conn.describe_cache_clusters(None, None, None, True) except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() elif e.error_code == "OptInRequired": error = "ElastiCache hasn't been enabled for this account yet. " \ "You must either log in to the ElastiCache service through the AWS console to enable it, " \ "or set 'elasticache = False' in ec2.ini" elif not e.reason == "Forbidden": error = "Looks like AWS ElastiCache is down:\n%s" % e.message self.fail_with_error(error, 'getting ElastiCache clusters') try: # Boto also doesn't provide wrapper classes to CacheClusters or # CacheNodes. Because of that we can't make use of the get_list # method in the AWSQueryConnection. Let's do the work manually clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] except KeyError as e: error = "ElastiCache query to AWS failed (unexpected format)." self.fail_with_error(error, 'getting ElastiCache clusters') for cluster in clusters: self.add_elasticache_cluster(cluster, region) def get_elasticache_replication_groups_by_region(self, region): ''' Makes an AWS API call to the list of ElastiCache replication groups in a particular region.''' # ElastiCache boto module doesn't provide a get_all_instances method, # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: conn = self.connect_to_aws(elasticache, region) if conn: response = conn.describe_replication_groups() except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message self.fail_with_error(error, 'getting ElastiCache clusters') try: # Boto also doesn't provide wrapper classes to ReplicationGroups # Because of that we can't make use of the get_list method in the # AWSQueryConnection. Let's do the work manually replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] except KeyError as e: error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." self.fail_with_error(error, 'getting ElastiCache clusters') for replication_group in replication_groups: self.add_elasticache_replication_group(replication_group, region) def get_auth_error_message(self): ''' create an informative error message if there is an issue authenticating''' errors = ["Authentication error retrieving ec2 inventory."] if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]: errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found') else: errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct') boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials'] boto_config_found = [p for p in boto_paths if os.path.isfile(os.path.expanduser(p))] if len(boto_config_found) > 0: errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found)) else: errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) return '\n'.join(errors) def fail_with_error(self, err_msg, err_operation=None): '''log an error to std err for ansible-playbook to consume and exit''' if err_operation: err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( err_msg=err_msg, err_operation=err_operation) sys.stderr.write(err_msg) sys.exit(1) def get_instance(self, region, instance_id): conn = self.connect(region) reservations = conn.get_all_instances([instance_id]) for reservation in reservations: for instance in reservation.instances: return instance def add_instance(self, instance, region): ''' Adds an instance to the inventory and index, as long as it is addressable ''' # Only return instances with desired instance states if instance.state not in self.ec2_instance_states: return # Select the best destination address # When destination_format and destination_format_tags are specified # the following code will attempt to find the instance tags first, # then the instance attributes next, and finally if neither are found # assign nil for the desired destination format attribute. if self.destination_format and self.destination_format_tags: dest_vars = [] inst_tags = getattr(instance, 'tags') for tag in self.destination_format_tags: if tag in inst_tags: dest_vars.append(inst_tags[tag]) elif hasattr(instance, tag): dest_vars.append(getattr(instance, tag)) else: dest_vars.append('nil') dest = self.destination_format.format(*dest_vars) elif instance.subnet_id: dest = getattr(instance, self.vpc_destination_variable, None) if dest is None: dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) else: dest = getattr(instance, self.destination_variable, None) if dest is None: dest = getattr(instance, 'tags').get(self.destination_variable, None) if not dest: # Skip instances we cannot address (e.g. private VPC subnet) return # Set the inventory name hostname = None if self.hostname_variable: if self.hostname_variable.startswith('tag_'): hostname = instance.tags.get(self.hostname_variable[4:], None) else: hostname = getattr(instance, self.hostname_variable) # set the hostname from route53 if self.route53_enabled and self.route53_hostnames: route53_names = self.get_instance_route53_names(instance) for name in route53_names: if name.endswith(self.route53_hostnames): hostname = name # If we can't get a nice hostname, use the destination address if not hostname: hostname = dest # to_safe strips hostname characters like dots, so don't strip route53 hostnames elif self.route53_enabled and self.route53_hostnames and hostname.endswith(self.route53_hostnames): hostname = hostname.lower() else: hostname = self.to_safe(hostname).lower() # if we only want to include hosts that match a pattern, skip those that don't if self.pattern_include and not self.pattern_include.match(hostname): return # if we need to exclude hosts that match a pattern, skip those if self.pattern_exclude and self.pattern_exclude.match(hostname): return # Add to index self.index[hostname] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[instance.id] = [hostname] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, hostname) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, instance.placement, hostname) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, instance.placement) self.push_group(self.inventory, 'zones', instance.placement) # Inventory: Group by Amazon Machine Image (AMI) ID if self.group_by_ami_id: ami_id = self.to_safe(instance.image_id) self.push(self.inventory, ami_id, hostname) if self.nested_groups: self.push_group(self.inventory, 'images', ami_id) # Inventory: Group by instance type if self.group_by_instance_type: type_name = self.to_safe('type_' + instance.instance_type) self.push(self.inventory, type_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by instance state if self.group_by_instance_state: state_name = self.to_safe('instance_state_' + instance.state) self.push(self.inventory, state_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'instance_states', state_name) # Inventory: Group by platform if self.group_by_platform: if instance.platform: platform = self.to_safe('platform_' + instance.platform) else: platform = self.to_safe('platform_undefined') self.push(self.inventory, platform, hostname) if self.nested_groups: self.push_group(self.inventory, 'platforms', platform) # Inventory: Group by key pair if self.group_by_key_pair and instance.key_name: key_name = self.to_safe('key_' + instance.key_name) self.push(self.inventory, key_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'keys', key_name) # Inventory: Group by VPC if self.group_by_vpc_id and instance.vpc_id: vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) self.push(self.inventory, vpc_id_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'vpcs', vpc_id_name) # Inventory: Group by security group if self.group_by_security_group: try: for group in instance.groups: key = self.to_safe("security_group_" + group.name) self.push(self.inventory, key, hostname) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: self.fail_with_error('\n'.join(['Package boto seems a bit older.', 'Please upgrade boto >= 2.3.0.'])) # Inventory: Group by AWS account ID if self.group_by_aws_account: self.push(self.inventory, self.aws_account_id, hostname) if self.nested_groups: self.push_group(self.inventory, 'accounts', self.aws_account_id) # Inventory: Group by tag keys if self.group_by_tag_keys: for k, v in instance.tags.items(): if self.expand_csv_tags and v and ',' in v: values = map(lambda x: x.strip(), v.split(',')) else: values = [v] for v in values: if v: key = self.to_safe("tag_" + k + "=" + v) else: key = self.to_safe("tag_" + k) self.push(self.inventory, key, hostname) if self.nested_groups: self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) if v: self.push_group(self.inventory, self.to_safe("tag_" + k), key) # Inventory: Group by Route53 domain names if enabled if self.route53_enabled and self.group_by_route53_names: route53_names = self.get_instance_route53_names(instance) for name in route53_names: self.push(self.inventory, name, hostname) if self.nested_groups: self.push_group(self.inventory, 'route53', name) # Global Tag: instances without tags if self.group_by_tag_none and len(instance.tags) == 0: self.push(self.inventory, 'tag_none', hostname) if self.nested_groups: self.push_group(self.inventory, 'tags', 'tag_none') # Global Tag: tag all EC2 instances self.push(self.inventory, 'ec2', hostname) self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest def add_rds_instance(self, instance, region): ''' Adds an RDS instance to the inventory and index, as long as it is addressable ''' # Only want available instances unless all_rds_instances is True if not self.all_rds_instances and instance.status != 'available': return # Select the best destination address dest = instance.endpoint[0] if not dest: # Skip instances we cannot address (e.g. private VPC subnet) return # Set the inventory name hostname = None if self.hostname_variable: if self.hostname_variable.startswith('tag_'): hostname = instance.tags.get(self.hostname_variable[4:], None) else: hostname = getattr(instance, self.hostname_variable) # If we can't get a nice hostname, use the destination address if not hostname: hostname = dest hostname = self.to_safe(hostname).lower() # Add to index self.index[hostname] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[instance.id] = [hostname] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, hostname) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, instance.availability_zone, hostname) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, instance.availability_zone) self.push_group(self.inventory, 'zones', instance.availability_zone) # Inventory: Group by instance type if self.group_by_instance_type: type_name = self.to_safe('type_' + instance.instance_class) self.push(self.inventory, type_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) self.push(self.inventory, vpc_id_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'vpcs', vpc_id_name) # Inventory: Group by security group if self.group_by_security_group: try: if instance.security_group: key = self.to_safe("security_group_" + instance.security_group.name) self.push(self.inventory, key, hostname) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: self.fail_with_error('\n'.join(['Package boto seems a bit older.', 'Please upgrade boto >= 2.3.0.'])) # Inventory: Group by tag keys if self.group_by_tag_keys: for k, v in instance.tags.items(): if self.expand_csv_tags and v and ',' in v: values = map(lambda x: x.strip(), v.split(',')) else: values = [v] for v in values: if v: key = self.to_safe("tag_" + k + "=" + v) else: key = self.to_safe("tag_" + k) self.push(self.inventory, key, hostname) if self.nested_groups: self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) if v: self.push_group(self.inventory, self.to_safe("tag_" + k), key) # Inventory: Group by engine if self.group_by_rds_engine: self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname) if self.nested_groups: self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) # Inventory: Group by parameter group if self.group_by_rds_parameter_group: self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname) if self.nested_groups: self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) # Global Tag: instances without tags if self.group_by_tag_none and len(instance.tags) == 0: self.push(self.inventory, 'tag_none', hostname) if self.nested_groups: self.push_group(self.inventory, 'tags', 'tag_none') # Global Tag: all RDS instances self.push(self.inventory, 'rds', hostname) self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest def add_elasticache_cluster(self, cluster, region): ''' Adds an ElastiCache cluster to the inventory and index, as long as it's nodes are addressable ''' # Only want available clusters unless all_elasticache_clusters is True if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available': return # Select the best destination address if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: # Memcached cluster dest = cluster['ConfigurationEndpoint']['Address'] is_redis = False else: # Redis sigle node cluster # Because all Redis clusters are single nodes, we'll merge the # info from the cluster with info about the node dest = cluster['CacheNodes'][0]['Endpoint']['Address'] is_redis = True if not dest: # Skip clusters we cannot address (e.g. private VPC subnet) return # Add to index self.index[dest] = [region, cluster['CacheClusterId']] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[cluster['CacheClusterId']] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) # Inventory: Group by region if self.group_by_region and not is_redis: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone and not is_redis: self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) # Inventory: Group by node type if self.group_by_instance_type and not is_redis: type_name = self.to_safe('type_' + cluster['CacheNodeType']) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC (information not available in the current # AWS API version for ElastiCache) # Inventory: Group by security group if self.group_by_security_group and not is_redis: # Check for the existence of the 'SecurityGroups' key and also if # this key has some value. When the cluster is not placed in a SG # the query can return None here and cause an error. if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: for security_group in cluster['SecurityGroups']: key = self.to_safe("security_group_" + security_group['SecurityGroupId']) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) # Inventory: Group by engine if self.group_by_elasticache_engine and not is_redis: self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) # Inventory: Group by parameter group if self.group_by_elasticache_parameter_group: self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName'])) # Inventory: Group by replication group if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId'])) # Global Tag: all ElastiCache clusters self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) host_info = self.get_host_info_dict_from_describe_dict(cluster) self.inventory["_meta"]["hostvars"][dest] = host_info # Add the nodes for node in cluster['CacheNodes']: self.add_elasticache_node(node, cluster, region) def add_elasticache_node(self, node, cluster, region): ''' Adds an ElastiCache node to the inventory and index, as long as it is addressable ''' # Only want available nodes unless all_elasticache_nodes is True if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': return # Select the best destination address dest = node['Endpoint']['Address'] if not dest: # Skip nodes we cannot address (e.g. private VPC subnet) return node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) # Add to index self.index[dest] = [region, node_id] # Inventory: Group by node ID (always a group of 1) if self.group_by_instance_id: self.inventory[node_id] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', node_id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) # Inventory: Group by node type if self.group_by_instance_type: type_name = self.to_safe('type_' + cluster['CacheNodeType']) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC (information not available in the current # AWS API version for ElastiCache) # Inventory: Group by security group if self.group_by_security_group: # Check for the existence of the 'SecurityGroups' key and also if # this key has some value. When the cluster is not placed in a SG # the query can return None here and cause an error. if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: for security_group in cluster['SecurityGroups']: key = self.to_safe("security_group_" + security_group['SecurityGroupId']) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) # Inventory: Group by engine if self.group_by_elasticache_engine: self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) # Inventory: Group by parameter group (done at cluster level) # Inventory: Group by replication group (done at cluster level) # Inventory: Group by ElastiCache Cluster if self.group_by_elasticache_cluster: self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) # Global Tag: all ElastiCache nodes self.push(self.inventory, 'elasticache_nodes', dest) host_info = self.get_host_info_dict_from_describe_dict(node) if dest in self.inventory["_meta"]["hostvars"]: self.inventory["_meta"]["hostvars"][dest].update(host_info) else: self.inventory["_meta"]["hostvars"][dest] = host_info def add_elasticache_replication_group(self, replication_group, region): ''' Adds an ElastiCache replication group to the inventory and index ''' # Only want available clusters unless all_elasticache_replication_groups is True if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available': return # Skip clusters we cannot address (e.g. private VPC subnet or clustered redis) if replication_group['NodeGroups'][0]['PrimaryEndpoint'] is None or \ replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] is None: return # Select the best destination address (PrimaryEndpoint) dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] # Add to index self.index[dest] = [region, replication_group['ReplicationGroupId']] # Inventory: Group by ID (always a group of 1) if self.group_by_instance_id: self.inventory[replication_group['ReplicationGroupId']] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId']) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone (doesn't apply to replication groups) # Inventory: Group by node type (doesn't apply to replication groups) # Inventory: Group by VPC (information not available in the current # AWS API version for replication groups # Inventory: Group by security group (doesn't apply to replication groups) # Check this value in cluster level # Inventory: Group by engine (replication groups are always Redis) if self.group_by_elasticache_engine: self.push(self.inventory, 'elasticache_redis', dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', 'redis') # Global Tag: all ElastiCache clusters self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId']) host_info = self.get_host_info_dict_from_describe_dict(replication_group) self.inventory["_meta"]["hostvars"][dest] = host_info def get_route53_records(self): ''' Get and store the map of resource records to domain names that point to them. ''' if self.boto_profile: r53_conn = route53.Route53Connection(profile_name=self.boto_profile) else: r53_conn = route53.Route53Connection() all_zones = r53_conn.get_zones() route53_zones = [zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones] self.route53_records = {} for zone in route53_zones: rrsets = r53_conn.get_all_rrsets(zone.id) for record_set in rrsets: record_name = record_set.name if record_name.endswith('.'): record_name = record_name[:-1] for resource in record_set.resource_records: self.route53_records.setdefault(resource, set()) self.route53_records[resource].add(record_name) def get_instance_route53_names(self, instance): ''' Check if an instance is referenced in the records we have from Route53. If it is, return the list of domain names pointing to said instance. If nothing points to it, return an empty list. ''' instance_attributes = ['public_dns_name', 'private_dns_name', 'ip_address', 'private_ip_address'] name_list = set() for attrib in instance_attributes: try: value = getattr(instance, attrib) except AttributeError: continue if value in self.route53_records: name_list.update(self.route53_records[value]) return list(name_list) def get_host_info_dict_from_instance(self, instance): instance_vars = {} for key in vars(instance): value = getattr(instance, key) key = self.to_safe('ec2_' + key) # Handle complex types # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 if key == 'ec2__state': instance_vars['ec2_state'] = instance.state or '' instance_vars['ec2_state_code'] = instance.state_code elif key == 'ec2__previous_state': instance_vars['ec2_previous_state'] = instance.previous_state or '' instance_vars['ec2_previous_state_code'] = instance.previous_state_code elif isinstance(value, (int, bool)): instance_vars[key] = value elif isinstance(value, six.string_types): instance_vars[key] = value.strip() elif value is None: instance_vars[key] = '' elif key == 'ec2_region': instance_vars[key] = value.name elif key == 'ec2__placement': instance_vars['ec2_placement'] = value.zone elif key == 'ec2_tags': for k, v in value.items(): if self.expand_csv_tags and ',' in v: v = list(map(lambda x: x.strip(), v.split(','))) key = self.to_safe('ec2_tag_' + k) instance_vars[key] = v elif key == 'ec2_groups': group_ids = [] group_names = [] for group in value: group_ids.append(group.id) group_names.append(group.name) instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) elif key == 'ec2_block_device_mapping': instance_vars["ec2_block_devices"] = {} for k, v in value.items(): instance_vars["ec2_block_devices"][os.path.basename(k)] = v.volume_id else: pass # TODO Product codes if someone finds them useful # print key # print type(value) # print value instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id return instance_vars def get_host_info_dict_from_describe_dict(self, describe_dict): ''' Parses the dictionary returned by the API call into a flat list of parameters. This method should be used only when 'describe' is used directly because Boto doesn't provide specific classes. ''' # I really don't agree with prefixing everything with 'ec2' # because EC2, RDS and ElastiCache are different services. # I'm just following the pattern used until now to not break any # compatibility. host_info = {} for key in describe_dict: value = describe_dict[key] key = self.to_safe('ec2_' + self.uncammelize(key)) # Handle complex types # Target: Memcached Cache Clusters if key == 'ec2_configuration_endpoint' and value: host_info['ec2_configuration_endpoint_address'] = value['Address'] host_info['ec2_configuration_endpoint_port'] = value['Port'] # Target: Cache Nodes and Redis Cache Clusters (single node) if key == 'ec2_endpoint' and value: host_info['ec2_endpoint_address'] = value['Address'] host_info['ec2_endpoint_port'] = value['Port'] # Target: Redis Replication Groups if key == 'ec2_node_groups' and value: host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] replica_count = 0 for node in value[0]['NodeGroupMembers']: if node['CurrentRole'] == 'primary': host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] elif node['CurrentRole'] == 'replica': host_info['ec2_replica_cluster_address_' + str(replica_count)] = node['ReadEndpoint']['Address'] host_info['ec2_replica_cluster_port_' + str(replica_count)] = node['ReadEndpoint']['Port'] host_info['ec2_replica_cluster_id_' + str(replica_count)] = node['CacheClusterId'] replica_count += 1 # Target: Redis Replication Groups if key == 'ec2_member_clusters' and value: host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) # Target: All Cache Clusters elif key == 'ec2_cache_parameter_group': host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] # Target: Almost everything elif key == 'ec2_security_groups': # Skip if SecurityGroups is None # (it is possible to have the key defined but no value in it). if value is not None: sg_ids = [] for sg in value: sg_ids.append(sg['SecurityGroupId']) host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) # Target: Everything # Preserve booleans and integers elif isinstance(value, (int, bool)): host_info[key] = value # Target: Everything # Sanitize string values elif isinstance(value, six.string_types): host_info[key] = value.strip() # Target: Everything # Replace None by an empty string elif value is None: host_info[key] = '' else: # Remove non-processed complex types pass return host_info def get_host_info(self): ''' Get variables about a specific host ''' if len(self.index) == 0: # Need to load index from cache self.load_index_from_cache() if self.args.host not in self.index: # try updating the cache self.do_api_calls_update_cache() if self.args.host not in self.index: # host might not exist anymore return self.json_format_dict({}, True) (region, instance_id) = self.index[self.args.host] instance = self.get_instance(region, instance_id) return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) def push(self, my_dict, key, element): ''' Push an element onto an array that may not have been defined in the dict ''' group_info = my_dict.setdefault(key, []) if isinstance(group_info, dict): host_list = group_info.setdefault('hosts', []) host_list.append(element) else: group_info.append(element) def push_group(self, my_dict, key, element): ''' Push a group as a child of another group. ''' parent_group = my_dict.setdefault(key, {}) if not isinstance(parent_group, dict): parent_group = my_dict[key] = {'hosts': parent_group} child_groups = parent_group.setdefault('children', []) if element not in child_groups: child_groups.append(element) def get_inventory_from_cache(self): ''' Reads the inventory from the cache file and returns it as a JSON object ''' with open(self.cache_path_cache, 'r') as f: json_inventory = f.read() return json_inventory def load_index_from_cache(self): ''' Reads the index from the cache file sets self.index ''' with open(self.cache_path_index, 'rb') as f: self.index = json.load(f) def write_to_cache(self, data, filename): ''' Writes data in JSON format to a file ''' json_data = self.json_format_dict(data, True) with open(filename, 'w') as f: f.write(json_data) def uncammelize(self, key): temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' regex = r"[^A-Za-z0-9\_" if not self.replace_dash_in_groups: regex += r"\-" return re.sub(regex + "]", "_", word) def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) if __name__ == '__main__': # Run the script Ec2Inventory() ansible-2.5.1/contrib/inventory/fleet.py0000755000000000000000000000577013265756155020273 0ustar rootroot00000000000000#!/usr/bin/env python """ fleetctl base external inventory script. Automatically finds the IPs of the booted coreos instances and returns it under the host group 'coreos' """ # Copyright (C) 2014 Andrew Rothstein # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # # Thanks to the vagrant.py inventory script for giving me the basic structure # of this. # import sys import subprocess import re import string from optparse import OptionParser try: import json except: import simplejson as json # Options # ------------------------------ parser = OptionParser(usage="%prog [options] --list | --host ") parser.add_option('--list', default=False, dest="list", action="store_true", help="Produce a JSON consumable grouping of servers in your fleet") parser.add_option('--host', default=None, dest="host", help="Generate additional host specific details for given host for Ansible") (options, args) = parser.parse_args() # # helper functions # def get_ssh_config(): configs = [] for box in list_running_boxes(): config = get_a_ssh_config(box) configs.append(config) return configs # list all the running instances in the fleet def list_running_boxes(): boxes = [] for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'): matcher = re.search(r"[^\s]+[\s]+([^\s]+).+", line) if matcher and matcher.group(1) != "IP": boxes.append(matcher.group(1)) return boxes def get_a_ssh_config(box_name): config = {} config['Host'] = box_name config['ansible_ssh_user'] = 'core' config['ansible_python_interpreter'] = '/opt/bin/python' return config # List out servers that vagrant has running # ------------------------------ if options.list: ssh_config = get_ssh_config() hosts = {'coreos': []} for data in ssh_config: hosts['coreos'].append(data['Host']) print(json.dumps(hosts)) sys.exit(1) # Get out the host details # ------------------------------ elif options.host: result = {} ssh_config = get_ssh_config() details = filter(lambda x: (x['Host'] == options.host), ssh_config) if len(details) > 0: # pass through the port, in case it's non standard. result = details[0] print(json.dumps(result)) sys.exit(1) # Print out help # ------------------------------ else: parser.print_help() sys.exit(1) ansible-2.5.1/contrib/inventory/foreman.ini0000644000000000000000000001234013265756155020736 0ustar rootroot00000000000000# Foreman inventory (https://github.com/theforeman/foreman_ansible_inventory) # # This script can be used as an Ansible dynamic inventory. # The connection parameters are set up via *foreman.ini* # This is how the script founds the configuration file in # order of discovery. # # * `/etc/ansible/foreman.ini` # * Current directory of your inventory script. # * `FOREMAN_INI_PATH` environment variable. # # ## Variables and Parameters # # The data returned from Foreman for each host is stored in a foreman # hash so they're available as *host_vars* along with the parameters # of the host and it's hostgroups: # # "foo.example.com": { # "foreman": { # "architecture_id": 1, # "architecture_name": "x86_64", # "build": false, # "build_status": 0, # "build_status_label": "Installed", # "capabilities": [ # "build", # "image" # ], # "compute_profile_id": 4, # "hostgroup_name": "webtier/myapp", # "id": 70, # "image_name": "debian8.1", # ... # "uuid": "50197c10-5ebb-b5cf-b384-a1e203e19e77" # }, # "foreman_params": { # "testparam1": "foobar", # "testparam2": "small", # ... # } # # and could therefore be used in Ansible like: # # - debug: msg="From Foreman host {{ foreman['uuid'] }}" # # Which yields # # TASK [test_foreman : debug] **************************************************** # ok: [foo.example.com] => { # "msg": "From Foreman host 50190bd1-052a-a34a-3c9c-df37a39550bf" # } # # ## Automatic Ansible groups # # The inventory will provide a set of groups, by default prefixed by # 'foreman_'. If you want to customize this prefix, change the # group_prefix option in /etc/ansible/foreman.ini. The rest of this # guide will assume the default prefix of 'foreman' # # The hostgroup, location, organization, content view, and lifecycle # environment of each host are created as Ansible groups with a # foreman_ prefix, all lowercase and problematic parameters # removed. So e.g. the foreman hostgroup # # myapp / webtier / datacenter1 # # would turn into the Ansible group: # # foreman_hostgroup_myapp_webtier_datacenter1 # # If the parameter want_hostcollections is set to true, the # collections each host is in are created as Ansible groups with a # foreman_hostcollection prefix, all lowercase and problematic # parameters removed. So e.g. the Foreman host collection # # Patch Window Thursday # # would turn into the Ansible group: # # foreman_hostcollection_patchwindowthursday # # If the parameter host_filters is set, it will be used as the # "search" parameter for the /api/v2/hosts call. This can be used to # restrict the list of returned host, as shown below. # # Furthermore Ansible groups can be created on the fly using the # *group_patterns* variable in *foreman.ini* so that you can build up # hierarchies using parameters on the hostgroup and host variables. # # Lets assume you have a host that is built using this nested hostgroup: # # myapp / webtier / datacenter1 # # and each of the hostgroups defines a parameters respectively: # # myapp: app_param = myapp # webtier: tier_param = webtier # datacenter1: dc_param = datacenter1 # # The host is also in a subnet called "mysubnet" and provisioned via an image # then *group_patterns* like: # # [ansible] # group_patterns = ["{app_param}-{tier_param}-{dc_param}", # "{app_param}-{tier_param}", # "{app_param}", # "{subnet_name}-{provision_method}"] # # would put the host into the additional Ansible groups: # # - myapp-webtier-datacenter1 # - myapp-webtier # - myapp # - mysubnet-image # # by recursively resolving the hostgroups, getting the parameter keys # and values and doing a Python *string.format()* like replacement on # it. # [foreman] url = http://localhost:3000/ user = foreman password = secret ssl_verify = True # Retrieve only hosts from the organization "Web Engineering". # host_filters = organization="Web Engineering" # Retrieve only hosts from the organization "Web Engineering" that are # also in the host collection "Apache Servers". # host_filters = organization="Web Engineering" and host_collection="Apache Servers" [ansible] group_patterns = ["{app}-{tier}-{color}", "{app}-{color}", "{app}", "{tier}"] group_prefix = foreman_ # Whether to fetch facts from Foreman and store them on the host want_facts = True # Whether to create Ansible groups for host collections. Only tested # with Katello (Red Hat Satellite). Disabled by default to not break # the script for stand-alone Foreman. want_hostcollections = False # Whether to interpret global parameters value as JSON (if possible, else # take as is). Only tested with Katello (Red Hat Satellite). # This allows to define lists and dictionaries (and more complicated structures) # variables by entering them as JSON string in Foreman parameters. # Disabled by default as the change would else not be backward compatible. rich_params = False [cache] path = . max_age = 60 # Whether to scan foreman to add recently created hosts in inventory cache scan_new_hosts = True ansible-2.5.1/contrib/inventory/foreman.py0000755000000000000000000004067313265756155020624 0ustar rootroot00000000000000#!/usr/bin/env python # vim: set fileencoding=utf-8 : # # Copyright (C) 2016 Guido Günther , # Daniel Lobato Garcia # # This script is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with it. If not, see . # # This is somewhat based on cobbler inventory # Stdlib imports # __future__ imports must occur at the beginning of file from __future__ import print_function try: # Python 2 version import ConfigParser except ImportError: # Python 3 version import configparser as ConfigParser import json import argparse import copy import os import re import sys from time import time from collections import defaultdict from distutils.version import LooseVersion, StrictVersion # 3rd party imports import requests if LooseVersion(requests.__version__) < LooseVersion('1.1.0'): print('This script requires python-requests 1.1 as a minimum version') sys.exit(1) from requests.auth import HTTPBasicAuth def json_format_dict(data, pretty=False): """Converts a dict to a JSON object and dumps it as a formatted string""" if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) class ForemanInventory(object): def __init__(self): self.inventory = defaultdict(list) # A list of groups and the hosts in that group self.cache = dict() # Details about hosts in the inventory self.params = dict() # Params of each host self.facts = dict() # Facts of each host self.hostgroups = dict() # host groups self.hostcollections = dict() # host collections self.session = None # Requests session self.config_paths = [ "/etc/ansible/foreman.ini", os.path.dirname(os.path.realpath(__file__)) + '/foreman.ini', ] env_value = os.environ.get('FOREMAN_INI_PATH') if env_value is not None: self.config_paths.append(os.path.expanduser(os.path.expandvars(env_value))) def read_settings(self): """Reads the settings from the foreman.ini file""" config = ConfigParser.SafeConfigParser() config.read(self.config_paths) # Foreman API related try: self.foreman_url = config.get('foreman', 'url') self.foreman_user = config.get('foreman', 'user') self.foreman_pw = config.get('foreman', 'password', raw=True) self.foreman_ssl_verify = config.getboolean('foreman', 'ssl_verify') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as e: print("Error parsing configuration: %s" % e, file=sys.stderr) return False # Ansible related try: group_patterns = config.get('ansible', 'group_patterns') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): group_patterns = "[]" self.group_patterns = json.loads(group_patterns) try: self.group_prefix = config.get('ansible', 'group_prefix') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): self.group_prefix = "foreman_" try: self.want_facts = config.getboolean('ansible', 'want_facts') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): self.want_facts = True try: self.want_hostcollections = config.getboolean('ansible', 'want_hostcollections') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): self.want_hostcollections = False # Do we want parameters to be interpreted if possible as JSON? (no by default) try: self.rich_params = config.getboolean('ansible', 'rich_params') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): self.rich_params = False try: self.host_filters = config.get('foreman', 'host_filters') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): self.host_filters = None # Cache related try: cache_path = os.path.expanduser(config.get('cache', 'path')) except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): cache_path = '.' (script, ext) = os.path.splitext(os.path.basename(__file__)) self.cache_path_cache = cache_path + "/%s.cache" % script self.cache_path_inventory = cache_path + "/%s.index" % script self.cache_path_params = cache_path + "/%s.params" % script self.cache_path_facts = cache_path + "/%s.facts" % script self.cache_path_hostcollections = cache_path + "/%s.hostcollections" % script try: self.cache_max_age = config.getint('cache', 'max_age') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): self.cache_max_age = 60 try: self.scan_new_hosts = config.getboolean('cache', 'scan_new_hosts') except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): self.scan_new_hosts = False return True def parse_cli_args(self): """Command line argument processing""" parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on foreman') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to foreman (default: False - use cache files)') self.args = parser.parse_args() def _get_session(self): if not self.session: self.session = requests.session() self.session.auth = HTTPBasicAuth(self.foreman_user, self.foreman_pw) self.session.verify = self.foreman_ssl_verify return self.session def _get_json(self, url, ignore_errors=None, params=None): if params is None: params = {} params['per_page'] = 250 page = 1 results = [] s = self._get_session() while True: params['page'] = page ret = s.get(url, params=params) if ignore_errors and ret.status_code in ignore_errors: break ret.raise_for_status() json = ret.json() # /hosts/:id has not results key if 'results' not in json: return json # Facts are returned as dict in results not list if isinstance(json['results'], dict): return json['results'] # List of all hosts is returned paginaged results = results + json['results'] if len(results) >= json['subtotal']: break page += 1 if len(json['results']) == 0: print("Did not make any progress during loop. " "expected %d got %d" % (json['total'], len(results)), file=sys.stderr) break return results def _get_hosts(self): url = "%s/api/v2/hosts" % self.foreman_url params = {} if self.host_filters: params['search'] = self.host_filters return self._get_json(url, params=params) def _get_host_data_by_id(self, hid): url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid) return self._get_json(url) def _get_facts_by_id(self, hid): url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid) return self._get_json(url) def _resolve_params(self, host_params): """Convert host params to dict""" params = {} for param in host_params: name = param['name'] if self.rich_params: try: params[name] = json.loads(param['value']) except ValueError: params[name] = param['value'] else: params[name] = param['value'] return params def _get_facts_by_id(self, hid): url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid) return self._get_json(url) def _get_facts(self, host): """Fetch all host facts of the host""" if not self.want_facts: return {} ret = self._get_facts_by_id(host['id']) if len(ret.values()) == 0: facts = {} elif len(ret.values()) == 1: facts = list(ret.values())[0] else: raise ValueError("More than one set of facts returned for '%s'" % host) return facts def write_to_cache(self, data, filename): """Write data in JSON format to a file""" json_data = json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def _write_cache(self): self.write_to_cache(self.cache, self.cache_path_cache) self.write_to_cache(self.inventory, self.cache_path_inventory) self.write_to_cache(self.params, self.cache_path_params) self.write_to_cache(self.facts, self.cache_path_facts) self.write_to_cache(self.hostcollections, self.cache_path_hostcollections) def to_safe(self, word): '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups >>> ForemanInventory.to_safe("foo-bar baz") 'foo_barbaz' ''' regex = r"[^A-Za-z0-9\_]" return re.sub(regex, "_", word.replace(" ", "")) def update_cache(self, scan_only_new_hosts=False): """Make calls to foreman and save the output in a cache""" self.groups = dict() self.hosts = dict() for host in self._get_hosts(): if host['name'] in self.cache.keys() and scan_only_new_hosts: continue dns_name = host['name'] host_data = self._get_host_data_by_id(host['id']) host_params = host_data.get('all_parameters', {}) # Create ansible groups for hostgroup group = 'hostgroup' val = host.get('%s_title' % group) or host.get('%s_name' % group) if val: safe_key = self.to_safe('%s%s_%s' % (self.group_prefix, group, val.lower())) self.inventory[safe_key].append(dns_name) # Create ansible groups for environment, location and organization for group in ['environment', 'location', 'organization']: val = host.get('%s_name' % group) if val: safe_key = self.to_safe('%s%s_%s' % (self.group_prefix, group, val.lower())) self.inventory[safe_key].append(dns_name) for group in ['lifecycle_environment', 'content_view']: val = host.get('content_facet_attributes', {}).get('%s_name' % group) if val: safe_key = self.to_safe('%s%s_%s' % (self.group_prefix, group, val.lower())) self.inventory[safe_key].append(dns_name) params = self._resolve_params(host_params) # Ansible groups by parameters in host groups and Foreman host # attributes. groupby = dict() for k, v in params.items(): groupby[k] = self.to_safe(str(v)) # The name of the ansible groups is given by group_patterns: for pattern in self.group_patterns: try: key = pattern.format(**groupby) self.inventory[key].append(dns_name) except KeyError: pass # Host not part of this group if self.want_hostcollections: hostcollections = host_data.get('host_collections') if hostcollections: # Create Ansible groups for host collections for hostcollection in hostcollections: safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection['name'].lower())) self.inventory[safe_key].append(dns_name) self.hostcollections[dns_name] = hostcollections self.cache[dns_name] = host self.params[dns_name] = params self.facts[dns_name] = self._get_facts(host) self.inventory['all'].append(dns_name) self._write_cache() def is_cache_valid(self): """Determines if the cache is still valid""" if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if (os.path.isfile(self.cache_path_inventory) and os.path.isfile(self.cache_path_params) and os.path.isfile(self.cache_path_facts)): return True return False def load_inventory_from_cache(self): """Read the index from the cache file sets self.index""" with open(self.cache_path_inventory, 'r') as fp: self.inventory = json.load(fp) def load_params_from_cache(self): """Read the index from the cache file sets self.index""" with open(self.cache_path_params, 'r') as fp: self.params = json.load(fp) def load_facts_from_cache(self): """Read the index from the cache file sets self.facts""" if not self.want_facts: return with open(self.cache_path_facts, 'r') as fp: self.facts = json.load(fp) def load_hostcollections_from_cache(self): """Read the index from the cache file sets self.hostcollections""" if not self.want_hostcollections: return with open(self.cache_path_hostcollections, 'r') as fp: self.hostcollections = json.load(fp) def load_cache_from_cache(self): """Read the cache from the cache file sets self.cache""" with open(self.cache_path_cache, 'r') as fp: self.cache = json.load(fp) def get_inventory(self): if self.args.refresh_cache or not self.is_cache_valid(): self.update_cache() else: self.load_inventory_from_cache() self.load_params_from_cache() self.load_facts_from_cache() self.load_hostcollections_from_cache() self.load_cache_from_cache() if self.scan_new_hosts: self.update_cache(True) def get_host_info(self): """Get variables about a specific host""" if not self.cache or len(self.cache) == 0: # Need to load index from cache self.load_cache_from_cache() if self.args.host not in self.cache: # try updating the cache self.update_cache() if self.args.host not in self.cache: # host might not exist anymore return json_format_dict({}, True) return json_format_dict(self.cache[self.args.host], True) def _print_data(self): data_to_print = "" if self.args.host: data_to_print += self.get_host_info() else: self.inventory['_meta'] = {'hostvars': {}} for hostname in self.cache: self.inventory['_meta']['hostvars'][hostname] = { 'foreman': self.cache[hostname], 'foreman_params': self.params[hostname], } if self.want_facts: self.inventory['_meta']['hostvars'][hostname]['foreman_facts'] = self.facts[hostname] data_to_print += json_format_dict(self.inventory, True) print(data_to_print) def run(self): # Read settings and parse CLI arguments if not self.read_settings(): return False self.parse_cli_args() self.get_inventory() self._print_data() return True if __name__ == '__main__': sys.exit(not ForemanInventory().run()) ansible-2.5.1/contrib/inventory/freeipa.py0000755000000000000000000000541713265756155020605 0ustar rootroot00000000000000#!/usr/bin/env python # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) import argparse import json from ipalib import api, errors from six import u def initialize(): ''' This function initializes the FreeIPA/IPA API. This function requires no arguments. A kerberos key must be present in the users keyring in order for this to work. ''' api.bootstrap(context='cli') api.finalize() try: api.Backend.rpcclient.connect() except AttributeError: # FreeIPA < 4.0 compatibility api.Backend.xmlclient.connect() return api def list_groups(api): ''' This function prints a list of all host groups. This function requires one argument, the FreeIPA/IPA API object. ''' inventory = {} hostvars = {} result = api.Command.hostgroup_find(all=True)['result'] for hostgroup in result: # Get direct and indirect members (nested hostgroups) of hostgroup members = [] if 'member_host' in hostgroup: members = [host for host in hostgroup['member_host']] if 'memberindirect_host' in hostgroup: members += (host for host in hostgroup['memberindirect_host']) inventory[hostgroup['cn'][0]] = {'hosts': [host for host in members]} for member in members: hostvars[member] = {} inventory['_meta'] = {'hostvars': hostvars} inv_string = json.dumps(inventory, indent=1, sort_keys=True) print(inv_string) return None def parse_args(): ''' This function parses the arguments that were passed in via the command line. This function expects no arguments. ''' parser = argparse.ArgumentParser(description='Ansible FreeIPA/IPA ' 'inventory module') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--list', action='store_true', help='List active servers') group.add_argument('--host', help='List details about the specified host') return parser.parse_args() def get_host_attributes(api, host): """ This function expects one string, this hostname to lookup variables for. Args: api: FreeIPA API Object host: Name of Hostname Returns: Dict of Host vars if found else None """ try: result = api.Command.host_show(u(host))['result'] if 'usercertificate' in result: del result['usercertificate'] return json.dumps(result, indent=1) except errors.NotFound as e: return {} if __name__ == '__main__': args = parse_args() api = initialize() if args.host: print(get_host_attributes(api, args.host)) elif args.list: list_groups(api) ansible-2.5.1/contrib/inventory/gce.ini0000644000000000000000000000572113265756155020052 0ustar rootroot00000000000000# Copyright 2013 Google Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # The GCE inventory script has the following dependencies: # 1. A valid Google Cloud Platform account with Google Compute Engine # enabled. See https://cloud.google.com # 2. An OAuth2 Service Account flow should be enabled. This will generate # a private key file that the inventory script will use for API request # authorization. See https://developers.google.com/accounts/docs/OAuth2 # 3. Convert the private key from PKCS12 to PEM format # $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret \ # > -nodes -nocerts | openssl rsa -out pkey.pem # 4. The libcloud (>=0.13.3) python libray. See http://libcloud.apache.org # # (See ansible/test/gce_tests.py comments for full install instructions) # # Author: Eric Johnson [gce] # GCE Service Account configuration information can be stored in the # libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already # exist in your PYTHONPATH and be picked up automatically with an import # statement in the inventory script. However, you can specify an absolute # path to the secrets.py file with 'libcloud_secrets' parameter. # This option will be deprecated in a future release. libcloud_secrets = # If you are not going to use a 'secrets.py' file, you can set the necessary # authorization parameters here. gce_service_account_email_address = gce_service_account_pem_file_path = gce_project_id = gce_zone = # Filter inventory based on on state. Leave undefined to return instances regardless of state. # example: Uncomment to only return inventory in the running or provisioning state #instance_states = RUNNING,PROVISIONING [inventory] # The 'inventory_ip_type' parameter specifies whether 'ansible_ssh_host' should # contain the instance internal or external address. Values may be either # 'internal' or 'external'. If 'external' is specified but no external instance # address exists, the internal address will be used. # The INVENTORY_IP_TYPE environment variable will override this value. inventory_ip_type = [cache] # directory in which cache should be created cache_path = ~/.ansible/tmp # The number of seconds a cache file is considered valid. After this many # seconds, a new API call will be made, and the cache file will be updated. # To disable the cache, set this value to 0 cache_max_age = 300 ansible-2.5.1/contrib/inventory/gce.py0000755000000000000000000004365213265756155017733 0ustar rootroot00000000000000#!/usr/bin/env python # Copyright 2013 Google Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ''' GCE external inventory script ================================= Generates inventory that Ansible can understand by making API requests Google Compute Engine via the libcloud library. Full install/configuration instructions for the gce* modules can be found in the comments of ansible/test/gce_tests.py. When run against a specific host, this script returns the following variables based on the data obtained from the libcloud Node object: - gce_uuid - gce_id - gce_image - gce_machine_type - gce_private_ip - gce_public_ip - gce_name - gce_description - gce_status - gce_zone - gce_tags - gce_metadata - gce_network - gce_subnetwork When run in --list mode, instances are grouped by the following categories: - zone: zone group name examples are us-central1-b, europe-west1-a, etc. - instance tags: An entry is created for each tag. For example, if you have two instances with a common tag called 'foo', they will both be grouped together under the 'tag_foo' name. - network name: the name of the network is appended to 'network_' (e.g. the 'default' network will result in a group named 'network_default') - machine type types follow a pattern like n1-standard-4, g1-small, etc. - running status: group name prefixed with 'status_' (e.g. status_running, status_stopped,..) - image: when using an ephemeral/scratch disk, this will be set to the image name used when creating the instance (e.g. debian-7-wheezy-v20130816). when your instance was created with a root persistent disk it will be set to 'persistent_disk' since there is no current way to determine the image. Examples: Execute uname on all instances in the us-central1-a zone $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" Use the GCE inventory script to print out instance specific information $ contrib/inventory/gce.py --host my_instance Author: Eric Johnson Contributors: Matt Hite , Tom Melendez Version: 0.0.3 ''' try: import pkg_resources except ImportError: # Use pkg_resources to find the correct versions of libraries and set # sys.path appropriately when there are multiversion installs. We don't # fail here as there is code that better expresses the errors where the # library is used. pass USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin" USER_AGENT_VERSION = "v2" import sys import os import argparse from time import time if sys.version_info >= (3, 0): import configparser else: import ConfigParser as configparser import logging logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler()) try: import json except ImportError: import simplejson as json try: from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver _ = Provider.GCE except: sys.exit("GCE inventory script requires libcloud >= 0.13") class CloudInventoryCache(object): def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp', cache_max_age=300): cache_dir = os.path.expanduser(cache_path) if not os.path.exists(cache_dir): os.makedirs(cache_dir) self.cache_path_cache = os.path.join(cache_dir, cache_name) self.cache_max_age = cache_max_age def is_valid(self, max_age=None): ''' Determines if the cache files have expired, or if it is still valid ''' if max_age is None: max_age = self.cache_max_age if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + max_age) > current_time: return True return False def get_all_data_from_cache(self, filename=''): ''' Reads the JSON inventory from the cache file. Returns Python dictionary. ''' data = '' if not filename: filename = self.cache_path_cache with open(filename, 'r') as cache: data = cache.read() return json.loads(data) def write_to_cache(self, data, filename=''): ''' Writes data to file as JSON. Returns True. ''' if not filename: filename = self.cache_path_cache json_data = json.dumps(data) with open(filename, 'w') as cache: cache.write(json_data) return True class GceInventory(object): def __init__(self): # Cache object self.cache = None # dictionary containing inventory read from disk self.inventory = {} # Read settings and parse CLI arguments self.parse_cli_args() self.config = self.get_config() self.driver = self.get_gce_driver() self.ip_type = self.get_inventory_options() if self.ip_type: self.ip_type = self.ip_type.lower() # Cache management start_inventory_time = time() cache_used = False if self.args.refresh_cache or not self.cache.is_valid(): self.do_api_calls_update_cache() else: self.load_inventory_from_cache() cache_used = True self.inventory['_meta']['stats'] = {'use_cache': True} self.inventory['_meta']['stats'] = { 'inventory_load_time': time() - start_inventory_time, 'cache_used': cache_used } # Just display data for specific host if self.args.host: print(self.json_format_dict( self.inventory['_meta']['hostvars'][self.args.host], pretty=self.args.pretty)) else: # Otherwise, assume user wants all instances grouped zones = self.parse_env_zones() print(self.json_format_dict(self.inventory, pretty=self.args.pretty)) sys.exit(0) def get_config(self): """ Reads the settings from the gce.ini file. Populates a SafeConfigParser object with defaults and attempts to read an .ini-style configuration from the filename specified in GCE_INI_PATH. If the environment variable is not present, the filename defaults to gce.ini in the current working directory. """ gce_ini_default_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "gce.ini") gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) # Create a ConfigParser. # This provides empty defaults to each key, so that environment # variable configuration (as opposed to INI configuration) is able # to work. config = configparser.SafeConfigParser(defaults={ 'gce_service_account_email_address': '', 'gce_service_account_pem_file_path': '', 'gce_project_id': '', 'gce_zone': '', 'libcloud_secrets': '', 'inventory_ip_type': '', 'cache_path': '~/.ansible/tmp', 'cache_max_age': '300' }) if 'gce' not in config.sections(): config.add_section('gce') if 'inventory' not in config.sections(): config.add_section('inventory') if 'cache' not in config.sections(): config.add_section('cache') config.read(gce_ini_path) ######### # Section added for processing ini settings ######### # Set the instance_states filter based on config file options self.instance_states = [] if config.has_option('gce', 'instance_states'): states = config.get('gce', 'instance_states') # Ignore if instance_states is an empty string. if states: self.instance_states = states.split(',') # Caching cache_path = config.get('cache', 'cache_path') cache_max_age = config.getint('cache', 'cache_max_age') # TOOD(supertom): support project-specific caches cache_name = 'ansible-gce.cache' self.cache = CloudInventoryCache(cache_path=cache_path, cache_max_age=cache_max_age, cache_name=cache_name) return config def get_inventory_options(self): """Determine inventory options. Environment variables always take precedence over configuration files.""" ip_type = self.config.get('inventory', 'inventory_ip_type') # If the appropriate environment variables are set, they override # other configuration ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type) return ip_type def get_gce_driver(self): """Determine the GCE authorization settings and return a libcloud driver. """ # Attempt to get GCE params from a configuration file, if one # exists. secrets_path = self.config.get('gce', 'libcloud_secrets') secrets_found = False try: import secrets args = list(secrets.GCE_PARAMS) kwargs = secrets.GCE_KEYWORD_PARAMS secrets_found = True except: pass if not secrets_found and secrets_path: if not secrets_path.endswith('secrets.py'): err = "Must specify libcloud secrets file as " err += "/absolute/path/to/secrets.py" sys.exit(err) sys.path.append(os.path.dirname(secrets_path)) try: import secrets args = list(getattr(secrets, 'GCE_PARAMS', [])) kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) secrets_found = True except: pass if not secrets_found: args = [ self.config.get('gce', 'gce_service_account_email_address'), self.config.get('gce', 'gce_service_account_pem_file_path') ] kwargs = {'project': self.config.get('gce', 'gce_project_id'), 'datacenter': self.config.get('gce', 'gce_zone')} # If the appropriate environment variables are set, they override # other configuration; process those into our args and kwargs. args[0] = os.environ.get('GCE_EMAIL', args[0]) args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) args[1] = os.environ.get('GCE_CREDENTIALS_FILE_PATH', args[1]) kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter']) # Retrieve and return the GCE driver. gce = get_driver(Provider.GCE)(*args, **kwargs) gce.connection.user_agent_append( '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION), ) return gce def parse_env_zones(self): '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable. If provided, this will be used to filter the results of the grouped_instances call''' import csv reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True) zones = [r for r in reader] return [z for z in zones[0]] def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser( description='Produce an Ansible Inventory file based on GCE') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all information about an instance') parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)') parser.add_argument( '--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests (default: False - use cache files)') self.args = parser.parse_args() def node_to_dict(self, inst): md = {} if inst is None: return {} if 'items' in inst.extra['metadata']: for entry in inst.extra['metadata']['items']: md[entry['key']] = entry['value'] net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] subnet = None if 'subnetwork' in inst.extra['networkInterfaces'][0]: subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1] # default to exernal IP unless user has specified they prefer internal if self.ip_type == 'internal': ssh_host = inst.private_ips[0] else: ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0] return { 'gce_uuid': inst.uuid, 'gce_id': inst.id, 'gce_image': inst.image, 'gce_machine_type': inst.size, 'gce_private_ip': inst.private_ips[0], 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None, 'gce_name': inst.name, 'gce_description': inst.extra['description'], 'gce_status': inst.extra['status'], 'gce_zone': inst.extra['zone'].name, 'gce_tags': inst.extra['tags'], 'gce_metadata': md, 'gce_network': net, 'gce_subnetwork': subnet, # Hosts don't have a public name, so we add an IP 'ansible_ssh_host': ssh_host } def load_inventory_from_cache(self): ''' Loads inventory from JSON on disk. ''' try: self.inventory = self.cache.get_all_data_from_cache() hosts = self.inventory['_meta']['hostvars'] except Exception as e: print( "Invalid inventory file %s. Please rebuild with -refresh-cache option." % (self.cache.cache_path_cache)) raise def do_api_calls_update_cache(self): ''' Do API calls and save data in cache. ''' zones = self.parse_env_zones() data = self.group_instances(zones) self.cache.write_to_cache(data) self.inventory = data def list_nodes(self): all_nodes = [] params, more_results = {'maxResults': 500}, True while more_results: self.driver.connection.gce_params = params all_nodes.extend(self.driver.list_nodes()) more_results = 'pageToken' in params return all_nodes def group_instances(self, zones=None): '''Group all instances''' groups = {} meta = {} meta["hostvars"] = {} for node in self.list_nodes(): # This check filters on the desired instance states defined in the # config file with the instance_states config option. # # If the instance_states list is _empty_ then _ALL_ states are returned. # # If the instance_states list is _populated_ then check the current # state against the instance_states list if self.instance_states and not node.extra['status'] in self.instance_states: continue name = node.name meta["hostvars"][name] = self.node_to_dict(node) zone = node.extra['zone'].name # To avoid making multiple requests per zone # we list all nodes and then filter the results if zones and zone not in zones: continue if zone in groups: groups[zone].append(name) else: groups[zone] = [name] tags = node.extra['tags'] for t in tags: if t.startswith('group-'): tag = t[6:] else: tag = 'tag_%s' % t if tag in groups: groups[tag].append(name) else: groups[tag] = [name] net = node.extra['networkInterfaces'][0]['network'].split('/')[-1] net = 'network_%s' % net if net in groups: groups[net].append(name) else: groups[net] = [name] machine_type = node.size if machine_type in groups: groups[machine_type].append(name) else: groups[machine_type] = [name] image = node.image and node.image or 'persistent_disk' if image in groups: groups[image].append(name) else: groups[image] = [name] status = node.extra['status'] stat = 'status_%s' % status.lower() if stat in groups: groups[stat].append(name) else: groups[stat] = [name] for private_ip in node.private_ips: groups[private_ip] = [name] if len(node.public_ips) >= 1: for public_ip in node.public_ips: groups[public_ip] = [name] groups["_meta"] = meta return groups def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) # Run the script if __name__ == '__main__': GceInventory() ansible-2.5.1/contrib/inventory/infoblox.py0000755000000000000000000000702013265756155021002 0ustar rootroot00000000000000#!/usr/bin/env python # # (c) 2018, Red Hat, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # import os import sys import json import argparse from ansible.parsing.dataloader import DataLoader from ansible.module_utils.six import iteritems from ansible.module_utils._text import to_text from ansible.module_utils.net_tools.nios.api import WapiInventory from ansible.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs CONFIG_FILES = [ '/etc/ansible/infoblox.yaml', '/etc/ansible/infoblox.yml' ] def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--list', action='store_true', help='List host records from NIOS for use in Ansible') parser.add_argument('--host', help='List meta data about single host (not used)') return parser.parse_args() def main(): args = parse_args() for config_file in CONFIG_FILES: if os.path.exists(config_file): break else: sys.stdout.write('unable to locate config file at /etc/ansible/infoblox.yaml\n') sys.exit(-1) try: loader = DataLoader() config = loader.load_from_file(config_file) provider = config.get('provider') or {} wapi = WapiInventory(provider) except Exception as exc: sys.stdout.write(to_text(exc)) sys.exit(-1) if args.host: host_filter = {'name': args.host} else: host_filter = {} config_filters = config.get('filters') if config_filters.get('view') is not None: host_filter['view'] = config_filters['view'] if config_filters.get('extattrs'): extattrs = normalize_extattrs(config_filters['extattrs']) else: extattrs = {} hostvars = {} inventory = { '_meta': { 'hostvars': hostvars } } return_fields = ['name', 'view', 'extattrs', 'ipv4addrs'] hosts = wapi.get_object('record:host', host_filter, extattrs=extattrs, return_fields=return_fields) if hosts: for item in hosts: view = item['view'] name = item['name'] if view not in inventory: inventory[view] = {'hosts': []} inventory[view]['hosts'].append(name) hostvars[name] = { 'view': view } if item.get('extattrs'): for key, value in iteritems(flatten_extattrs(item['extattrs'])): if key.startswith('ansible_'): hostvars[name][key] = value else: if 'extattrs' not in hostvars[name]: hostvars[name]['extattrs'] = {} hostvars[name]['extattrs'][key] = value sys.stdout.write(json.dumps(inventory, indent=4)) sys.exit(0) if __name__ == '__main__': main() ansible-2.5.1/contrib/inventory/infoblox.yaml0000644000000000000000000000166113265756155021316 0ustar rootroot00000000000000--- # This file provides the configuration information for the Infoblox dynamic # inventory script that is used to dynamically pull host information from NIOS. # This file should be copied to /etc/ansible/infoblox.yaml in order for the # dynamic script to find it. # Sets the provider arguments for authenticating to the Infoblox server to # retrieve inventory hosts. Provider arguments can also be set using # environment variables. Supported environment variables all start with # INFOBLOX_{{ name }}. For instance, to set the host provider value, the # environment variable would be INFOBLOX_HOST. provider: host: username: password: # Filters allow the dynamic inventory script to restrict the set of hosts that # are returned from the Infoblox server. filters: # restrict returned hosts by extensible attributes extattrs: {} # restrict returned hosts to a specified DNS view view: null ansible-2.5.1/contrib/inventory/jail.py0000755000000000000000000000243113265756155020102 0ustar rootroot00000000000000#!/usr/bin/env python # (c) 2013, Michael Scherer # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . from subprocess import Popen, PIPE import sys import json result = {} result['all'] = {} pipe = Popen(['jls', '-q', 'name'], stdout=PIPE, universal_newlines=True) result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()] result['all']['vars'] = {} result['all']['vars']['ansible_connection'] = 'jail' if len(sys.argv) == 2 and sys.argv[1] == '--list': print(json.dumps(result)) elif len(sys.argv) == 3 and sys.argv[1] == '--host': print(json.dumps({'ansible_connection': 'jail'})) else: sys.stderr.write("Need an argument, either --list or --host \n") ansible-2.5.1/contrib/inventory/landscape.py0000755000000000000000000000661313265756155021123 0ustar rootroot00000000000000#!/usr/bin/env python # (c) 2015, Marc Abramowitz # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Dynamic inventory script which lets you use nodes discovered by Canonical's # Landscape (http://www.ubuntu.com/management/landscape-features). # # Requires the `landscape_api` Python module # See: # - https://landscape.canonical.com/static/doc/api/api-client-package.html # - https://landscape.canonical.com/static/doc/api/python-api.html # # Environment variables # --------------------- # - `LANDSCAPE_API_URI` # - `LANDSCAPE_API_KEY` # - `LANDSCAPE_API_SECRET` # - `LANDSCAPE_API_SSL_CA_FILE` (optional) import argparse import collections import os import sys from landscape_api.base import API, HTTPError try: import json except ImportError: import simplejson as json _key = 'landscape' class EnvironmentConfig(object): uri = os.getenv('LANDSCAPE_API_URI') access_key = os.getenv('LANDSCAPE_API_KEY') secret_key = os.getenv('LANDSCAPE_API_SECRET') ssl_ca_file = os.getenv('LANDSCAPE_API_SSL_CA_FILE') def _landscape_client(): env = EnvironmentConfig() return API( uri=env.uri, access_key=env.access_key, secret_key=env.secret_key, ssl_ca_file=env.ssl_ca_file) def get_landscape_members_data(): return _landscape_client().get_computers() def get_nodes(data): return [node['hostname'] for node in data] def get_groups(data): groups = collections.defaultdict(list) for node in data: for value in node['tags']: groups[value].append(node['hostname']) return groups def get_meta(data): meta = {'hostvars': {}} for node in data: meta['hostvars'][node['hostname']] = {'tags': node['tags']} return meta def print_list(): data = get_landscape_members_data() nodes = get_nodes(data) groups = get_groups(data) meta = get_meta(data) inventory_data = {_key: nodes, '_meta': meta} inventory_data.update(groups) print(json.dumps(inventory_data)) def print_host(host): data = get_landscape_members_data() meta = get_meta(data) print(json.dumps(meta['hostvars'][host])) def get_args(args_list): parser = argparse.ArgumentParser( description='ansible inventory script reading from landscape cluster') mutex_group = parser.add_mutually_exclusive_group(required=True) help_list = 'list all hosts from landscape cluster' mutex_group.add_argument('--list', action='store_true', help=help_list) help_host = 'display variables for a host' mutex_group.add_argument('--host', help=help_host) return parser.parse_args(args_list) def main(args_list): args = get_args(args_list) if args.list: print_list() if args.host: print_host(args.host) if __name__ == '__main__': main(sys.argv[1:]) ansible-2.5.1/contrib/inventory/libcloud.ini0000644000000000000000000000032313265756155021102 0ustar rootroot00000000000000# Ansible Apache Libcloud Generic inventory script [driver] provider = CLOUDSTACK host = path = secure = True verify_ssl_cert = True key = secret = [cache] cache_path=/path/to/your/cache cache_max_age=60 ansible-2.5.1/contrib/inventory/libvirt_lxc.py0000755000000000000000000000251513265756155021507 0ustar rootroot00000000000000#!/usr/bin/env python # (c) 2013, Michael Scherer # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . from subprocess import Popen, PIPE import sys import json result = {} result['all'] = {} pipe = Popen(['virsh', '-q', '-c', 'lxc:///', 'list', '--name', '--all'], stdout=PIPE, universal_newlines=True) result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()] result['all']['vars'] = {} result['all']['vars']['ansible_connection'] = 'libvirt_lxc' if len(sys.argv) == 2 and sys.argv[1] == '--list': print(json.dumps(result)) elif len(sys.argv) == 3 and sys.argv[1] == '--host': print(json.dumps({'ansible_connection': 'libvirt_lxc'})) else: sys.stderr.write("Need an argument, either --list or --host \n") ansible-2.5.1/contrib/inventory/linode.ini0000644000000000000000000000113513265756155020561 0ustar rootroot00000000000000# Ansible Linode external inventory script settings # [linode] # API calls to Linode are slow. For this reason, we cache the results of an API # call. Set this to the path you want cache files to be written to. Two files # will be written to this directory: # - ansible-Linode.cache # - ansible-Linode.index cache_path = /tmp # The number of seconds a cache file is considered valid. After this many # seconds, a new API call will be made, and the cache file will be updated. cache_max_age = 300 # If set to true use the hosts public ip in the dictionary instead of the label use_public_ip = falseansible-2.5.1/contrib/inventory/linode.py0000755000000000000000000002773513265756155020453 0ustar rootroot00000000000000#!/usr/bin/env python ''' Linode external inventory script ================================= Generates inventory that Ansible can understand by making API request to Linode using the Chube library. NOTE: This script assumes Ansible is being executed where Chube is already installed and has a valid config at ~/.chube. If not, run: pip install chube echo -e "---\napi_key: " > ~/.chube For more details, see: https://github.com/exosite/chube NOTE: By default, this script also assumes that the Linodes in your account all have labels that correspond to hostnames that are in your resolver search path. Your resolver search path resides in /etc/hosts. Optionally, if you would like to use the hosts public IP instead of it's label use the following setting in linode.ini: use_public_ip = true When run against a specific host, this script returns the following variables: - api_id - datacenter_id - datacenter_city (lowercase city name of data center, e.g. 'tokyo') - label - display_group - create_dt - total_hd - total_xfer - total_ram - status - public_ip (The first public IP found) - private_ip (The first private IP found, or empty string if none) - alert_cpu_enabled - alert_cpu_threshold - alert_diskio_enabled - alert_diskio_threshold - alert_bwin_enabled - alert_bwin_threshold - alert_bwout_enabled - alert_bwout_threshold - alert_bwquota_enabled - alert_bwquota_threshold - backup_weekly_daily - backup_window - watchdog Peter Sankauskas did most of the legwork here with his linode plugin; I just adapted that for Linode. ''' # (c) 2013, Dan Slimmon # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### # Standard imports import os import re import sys import argparse from time import time try: import json except ImportError: import simplejson as json try: from chube import load_chube_config from chube import api as chube_api from chube.datacenter import Datacenter from chube.linode_obj import Linode except: try: # remove local paths and other stuff that may # cause an import conflict, as chube is sensitive # to name collisions on importing old_path = sys.path sys.path = [d for d in sys.path if d not in ('', os.getcwd(), os.path.dirname(os.path.realpath(__file__)))] from chube import load_chube_config from chube import api as chube_api from chube.datacenter import Datacenter from chube.linode_obj import Linode sys.path = old_path except Exception as e: raise Exception("could not import chube") load_chube_config() # Imports for ansible import ConfigParser class LinodeInventory(object): def _empty_inventory(self): return {"_meta": {"hostvars": {}}} def __init__(self): """Main execution path.""" # Inventory grouped by display group self.inventory = self._empty_inventory() # Index of label to Linode ID self.index = {} # Local cache of Datacenter objects populated by populate_datacenter_cache() self._datacenter_cache = None # Read settings and parse CLI arguments self.read_settings() self.parse_cli_args() # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() # Data to print if self.args.host: data_to_print = self.get_host_info() elif self.args.list: # Display list of nodes for inventory if len(self.inventory) == 1: data_to_print = self.get_inventory_from_cache() else: data_to_print = self.json_format_dict(self.inventory, True) print(data_to_print) def is_cache_valid(self): """Determines if the cache file has expired, or if it is still valid.""" if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): """Reads the settings from the .ini file.""" config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/linode.ini') # Cache related cache_path = config.get('linode', 'cache_path') self.cache_path_cache = cache_path + "/ansible-linode.cache" self.cache_path_index = cache_path + "/ansible-linode.index" self.cache_max_age = config.getint('linode', 'cache_max_age') self.use_public_ip = config.getboolean('linode', 'use_public_ip') def parse_cli_args(self): """Command line argument processing""" parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Linode') parser.add_argument('--list', action='store_true', default=True, help='List nodes (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific node') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to Linode (default: False - use cache files)') self.args = parser.parse_args() def do_api_calls_update_cache(self): """Do API calls, and save data in cache files.""" self.get_nodes() self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def get_nodes(self): """Makes an Linode API call to get the list of nodes.""" try: for node in Linode.search(status=Linode.STATUS_RUNNING): self.add_node(node) except chube_api.linode_api.ApiError as e: sys.exit("Looks like Linode's API is down:\n %s" % e) def get_node(self, linode_id): """Gets details about a specific node.""" try: return Linode.find(api_id=linode_id) except chube_api.linode_api.ApiError as e: sys.exit("Looks like Linode's API is down:\n%s" % e) def populate_datacenter_cache(self): """Creates self._datacenter_cache, containing all Datacenters indexed by ID.""" self._datacenter_cache = {} dcs = Datacenter.search() for dc in dcs: self._datacenter_cache[dc.api_id] = dc def get_datacenter_city(self, node): """Returns a the lowercase city name of the node's data center.""" if self._datacenter_cache is None: self.populate_datacenter_cache() location = self._datacenter_cache[node.datacenter_id].location location = location.lower() location = location.split(",")[0] return location def add_node(self, node): """Adds an node to the inventory and index.""" if self.use_public_ip: dest = self.get_node_public_ip(node) else: dest = node.label # Add to index self.index[dest] = node.api_id # Inventory: Group by node ID (always a group of 1) self.inventory[node.api_id] = [dest] # Inventory: Group by datacenter city self.push(self.inventory, self.get_datacenter_city(node), dest) # Inventory: Group by display group self.push(self.inventory, node.display_group, dest) # Inventory: Add a "linode" global tag group self.push(self.inventory, "linode", dest) # Add host info to hostvars self.inventory["_meta"]["hostvars"][dest] = self._get_host_info(node) def get_node_public_ip(self, node): """Returns a the public IP address of the node""" return [addr.address for addr in node.ipaddresses if addr.is_public][0] def get_host_info(self): """Get variables about a specific host.""" if len(self.index) == 0: # Need to load index from cache self.load_index_from_cache() if self.args.host not in self.index: # try updating the cache self.do_api_calls_update_cache() if self.args.host not in self.index: # host might not exist anymore return self.json_format_dict({}, True) node_id = self.index[self.args.host] node = self.get_node(node_id) return self.json_format_dict(self._get_host_info(node), True) def _get_host_info(self, node): node_vars = {} for direct_attr in [ "api_id", "datacenter_id", "label", "display_group", "create_dt", "total_hd", "total_xfer", "total_ram", "status", "alert_cpu_enabled", "alert_cpu_threshold", "alert_diskio_enabled", "alert_diskio_threshold", "alert_bwin_enabled", "alert_bwin_threshold", "alert_bwout_enabled", "alert_bwout_threshold", "alert_bwquota_enabled", "alert_bwquota_threshold", "backup_weekly_daily", "backup_window", "watchdog" ]: node_vars[direct_attr] = getattr(node, direct_attr) node_vars["datacenter_city"] = self.get_datacenter_city(node) node_vars["public_ip"] = self.get_node_public_ip(node) # Set the SSH host information, so these inventory items can be used if # their labels aren't FQDNs node_vars['ansible_ssh_host'] = node_vars["public_ip"] node_vars['ansible_host'] = node_vars["public_ip"] private_ips = [addr.address for addr in node.ipaddresses if not addr.is_public] if private_ips: node_vars["private_ip"] = private_ips[0] return node_vars def push(self, my_dict, key, element): """Pushed an element onto an array that may not have been defined in the dict.""" if key in my_dict: my_dict[key].append(element) else: my_dict[key] = [element] def get_inventory_from_cache(self): """Reads the inventory from the cache file and returns it as a JSON object.""" cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() return json_inventory def load_index_from_cache(self): """Reads the index from the cache file and sets self.index.""" cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index) def write_to_cache(self, data, filename): """Writes data in JSON format to a file.""" json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): """Escapes any characters that would be invalid in an ansible group name.""" return re.sub(r"[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): """Converts a dict to a JSON object and dumps it as a formatted string.""" if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) LinodeInventory() ansible-2.5.1/contrib/inventory/lxc_inventory.py0000755000000000000000000000477713265756155022105 0ustar rootroot00000000000000#!/usr/bin/env python # # (c) 2015-16 Florian Haas, hastexo Professional Services GmbH # # Based in part on: # libvirt_lxc.py, (c) 2013, Michael Scherer # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . """ Ansible inventory script for LXC containers. Requires Python bindings for LXC API. In LXC, containers can be grouped by setting the lxc.group option, which may be found more than once in a container's configuration. So, we enumerate all containers, fetch their list of groups, and then build the dictionary in the way Ansible expects it. """ from __future__ import print_function import sys import lxc import json def build_dict(): """Returns a dictionary keyed to the defined LXC groups. All containers, including the ones not in any group, are included in the "all" group.""" # Enumerate all containers, and list the groups they are in. Also, # implicitly add every container to the 'all' group. containers = dict([(c, ['all'] + (lxc.Container(c).get_config_item('lxc.group') or [])) for c in lxc.list_containers()]) # Extract the groups, flatten the list, and remove duplicates groups = set(sum([g for g in containers.values()], [])) # Create a dictionary for each group (including the 'all' group return dict([(g, {'hosts': [k for k, v in containers.items() if g in v], 'vars': {'ansible_connection': 'lxc'}}) for g in groups]) def main(argv): """Returns a JSON dictionary as expected by Ansible""" result = build_dict() if len(argv) == 2 and argv[1] == '--list': json.dump(result, sys.stdout) elif len(argv) == 3 and argv[1] == '--host': json.dump({'ansible_connection': 'lxc'}, sys.stdout) else: print("Need an argument, either --list or --host ", file=sys.stderr) if __name__ == '__main__': main(sys.argv) ansible-2.5.1/contrib/inventory/lxd.ini0000644000000000000000000000040113265756155020071 0ustar rootroot00000000000000# LXD external inventory script settings [lxd] # The default resource #resource = local: # The group name to add the hosts to #group = lxd # The connection type to return for these hosts - lxd hasn't been tested yet #connection = lxd connection = smart ansible-2.5.1/contrib/inventory/lxd.py0000644000000000000000000000723613265756155017757 0ustar rootroot00000000000000#!/usr/bin/env python # (c) 2013, Michael Scherer # (c) 2014, Hiroaki Nakamura # (c) 2016, Andew Clarke # # This file is based on https://github.com/ansible/ansible/blob/devel/plugins/inventory/libvirt_lxc.py which is part of Ansible, # and https://github.com/hnakamur/lxc-ansible-playbooks/blob/master/provisioning/inventory-lxc.py # # NOTE, this file has some obvious limitations, improvements welcome # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . import os from subprocess import Popen, PIPE import distutils.spawn import sys import json try: import configparser except: from six.moves import configparser # Set up defaults resource = 'local:' group = 'lxd' connection = 'lxd' hosts = {} result = {} # Read the settings from the lxd.ini file config = configparser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/lxd.ini') if config.has_option('lxd', 'resource'): resource = config.get('lxd', 'resource') if config.has_option('lxd', 'group'): group = config.get('lxd', 'group') if config.has_option('lxd', 'connection'): connection = config.get('lxd', 'connection') # Ensure executable exists if distutils.spawn.find_executable('lxc'): # Set up containers result and hosts array result[group] = {} result[group]['hosts'] = [] # Run the command and load json result pipe = Popen(['lxc', 'list', resource, '--format', 'json'], stdout=PIPE, universal_newlines=True) lxdjson = json.load(pipe.stdout) # Iterate the json lxd output for item in lxdjson: # Check state and network if 'state' in item and item['state'] is not None and 'network' in item['state']: network = item['state']['network'] # Check for eth0 and addresses if 'eth0' in network and 'addresses' in network['eth0']: addresses = network['eth0']['addresses'] # Iterate addresses for address in addresses: # Only return inet family addresses if 'family' in address and address['family'] == 'inet': if 'address' in address: ip = address['address'] name = item['name'] # Add the host to the results and the host array result[group]['hosts'].append(name) hosts[name] = ip # Set the other containers result values result[group]['vars'] = {} result[group]['vars']['ansible_connection'] = connection # Process arguments if len(sys.argv) == 2 and sys.argv[1] == '--list': print(json.dumps(result)) elif len(sys.argv) == 3 and sys.argv[1] == '--host': if sys.argv[2] == 'localhost': print(json.dumps({'ansible_connection': 'local'})) else: if connection == 'lxd': print(json.dumps({'ansible_connection': connection})) else: print(json.dumps({'ansible_connection': connection, 'ansible_host': hosts[sys.argv[2]]})) else: print("Need an argument, either --list or --host ") ansible-2.5.1/contrib/inventory/mdt.ini0000644000000000000000000000042013265756155020067 0ustar rootroot00000000000000[mdt] # Set the MDT server to connect to server = localhost.example.com # Set the MDT Instance instance = EXAMPLEINSTANCE # Set the MDT database database = MDTDB # Configure login credentials user = local.domain\admin password = adminpassword [tower] groupname = mdt ansible-2.5.1/contrib/inventory/mdt_dynamic_inventory.py0000755000000000000000000001067213265756155023576 0ustar rootroot00000000000000#!/usr/bin/env python # (c) 2016, Julian Barnett # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ''' MDT external inventory script ================================= author: J Barnett 06/23/2016 01:15 maintainer: J Barnett (github @jbarnett1981) ''' import argparse import json import pymssql try: import configparser except ImportError: import ConfigParser as configparser class MDTInventory(object): def __init__(self): ''' Main execution path ''' self.conn = None # Initialize empty inventory self.inventory = self._empty_inventory() # Read CLI arguments self.read_settings() self.parse_cli_args() # Get Hosts if self.args.list: self.get_hosts() # Get specific host vars if self.args.host: self.get_hosts(self.args.host) def _connect(self, query): ''' Connect to MDT and dump contents of dbo.ComputerIdentity database ''' if not self.conn: self.conn = pymssql.connect(server=self.mdt_server + "\\" + self.mdt_instance, user=self.mdt_user, password=self.mdt_password, database=self.mdt_database) cursor = self.conn.cursor() cursor.execute(query) self.mdt_dump = cursor.fetchall() self.conn.close() def get_hosts(self, hostname=False): ''' Gets host from MDT Database ''' if hostname: query = ("SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role " "FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID where t1.Description = '%s'" % hostname) else: query = 'SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID' self._connect(query) # Configure to group name configured in Ansible Tower for this inventory groupname = self.mdt_groupname # Initialize empty host list hostlist = [] # Parse through db dump and populate inventory for hosts in self.mdt_dump: self.inventory['_meta']['hostvars'][hosts[1]] = {'id': hosts[0], 'name': hosts[1], 'mac': hosts[2], 'role': hosts[3]} hostlist.append(hosts[1]) self.inventory[groupname] = hostlist # Print it all out print(json.dumps(self.inventory, indent=2)) def _empty_inventory(self): ''' Create empty inventory dictionary ''' return {"_meta": {"hostvars": {}}} def read_settings(self): ''' Reads the settings from the mdt.ini file ''' config = configparser.SafeConfigParser() config.read('mdt.ini') # MDT Server and instance and database self.mdt_server = config.get('mdt', 'server') self.mdt_instance = config.get('mdt', 'instance') self.mdt_database = config.get('mdt', 'database') # MDT Login credentials if config.has_option('mdt', 'user'): self.mdt_user = config.get('mdt', 'user') if config.has_option('mdt', 'password'): self.mdt_password = config.get('mdt', 'password') # Group name in Tower if config.has_option('tower', 'groupname'): self.mdt_groupname = config.get('tower', 'groupname') def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on MDT') parser.add_argument('--list', action='store_true', default=False, help='List instances') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') self.args = parser.parse_args() if __name__ == "__main__": # Run the script MDTInventory() ansible-2.5.1/contrib/inventory/nagios_livestatus.ini0000644000000000000000000000214013265756155023047 0ustar rootroot00000000000000# Ansible Nagios external inventory script settings # # To get all available possibilities, check following URL: # http://www.naemon.org/documentation/usersguide/livestatus.html # https://mathias-kettner.de/checkmk_livestatus.html # [local] # Livestatus URI # Example for default naemon livestatus unix socket : # livestatus_uri=unix:/var/cache/naemon/live [remote] # default field name for host: name # Uncomment to override: # host_field=address # # default field group for host: groups # Uncomment to override: # group_field=state # default fields retrieved: address, alias, display_name, children, parents # To override, uncomment the following line # fields_to_retrieve=address,alias,display_name # # default variable prefix: livestatus_ # To override, uncomment the following line # var_prefix=naemon_ # # default filter: None # # Uncomment to override # # All host with state = OK # host_filter=state = 0 # Warning: for the moment, you can use only one filter at a time. You cannot combine various conditions. # # All host in groups Linux # host_filter=groups >= Linux # livestatus_uri=tcp:192.168.66.137:6557 ansible-2.5.1/contrib/inventory/nagios_livestatus.py0000755000000000000000000001512113265756155022726 0ustar rootroot00000000000000#!/usr/bin/env python # (c) 2015, Yannig Perre # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ''' Nagios livestatus inventory script. Before using this script, please update nagios_livestatus.ini file. Livestatus is a nagios/naemon/shinken module which let you retrieve informations stored in the monitoring core. This plugin inventory need livestatus API for python. Please install it before using this script (apt/pip/yum/...). Checkmk livestatus: https://mathias-kettner.de/checkmk_livestatus.html Livestatus API: http://www.naemon.org/documentation/usersguide/livestatus.html ''' import os import re import argparse import sys try: import configparser except ImportError: import ConfigParser configparser = ConfigParser import json try: from mk_livestatus import Socket except ImportError: sys.exit("Error: mk_livestatus is needed. Try something like: pip install python-mk-livestatus") class NagiosLivestatusInventory(object): def parse_ini_file(self): config = configparser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_livestatus.ini') for section in config.sections(): if not config.has_option(section, 'livestatus_uri'): continue # If fields_to_retrieve is not set, using default fields fields_to_retrieve = self.default_fields_to_retrieve if config.has_option(section, 'fields_to_retrieve'): fields_to_retrieve = [field.strip() for field in config.get(section, 'fields_to_retrieve').split(',')] fields_to_retrieve = tuple(fields_to_retrieve) # default section values section_values = { 'var_prefix': 'livestatus_', 'host_filter': None, 'host_field': 'name', 'group_field': 'groups' } for key, value in section_values.items(): if config.has_option(section, key): section_values[key] = config.get(section, key).strip() # Retrieving livestatus string connection livestatus_uri = config.get(section, 'livestatus_uri') backend_definition = None # Local unix socket unix_match = re.match('unix:(.*)', livestatus_uri) if unix_match is not None: backend_definition = {'connection': unix_match.group(1)} # Remote tcp connection tcp_match = re.match('tcp:(.*):([^:]*)', livestatus_uri) if tcp_match is not None: backend_definition = {'connection': (tcp_match.group(1), int(tcp_match.group(2)))} # No valid livestatus_uri => exiting if backend_definition is None: raise Exception('livestatus_uri field is invalid (%s). Expected: unix:/path/to/live or tcp:host:port' % livestatus_uri) # Updating backend_definition with current value backend_definition['name'] = section backend_definition['fields'] = fields_to_retrieve for key, value in section_values.items(): backend_definition[key] = value self.backends.append(backend_definition) def parse_options(self): parser = argparse.ArgumentParser() parser.add_argument('--host', nargs=1) parser.add_argument('--list', action='store_true') parser.add_argument('--pretty', action='store_true') self.options = parser.parse_args() def add_host(self, hostname, group): if group not in self.result: self.result[group] = {} self.result[group]['hosts'] = [] if hostname not in self.result[group]['hosts']: self.result[group]['hosts'].append(hostname) def query_backend(self, backend, host=None): '''Query a livestatus backend''' hosts_request = Socket(backend['connection']).hosts.columns(backend['host_field'], backend['group_field']) if backend['host_filter'] is not None: hosts_request = hosts_request.filter(backend['host_filter']) if host is not None: hosts_request = hosts_request.filter('name = ' + host[0]) hosts_request._columns += backend['fields'] hosts = hosts_request.call() for host in hosts: hostname = host[backend['host_field']] hostgroups = host[backend['group_field']] if not isinstance(hostgroups, list): hostgroups = [hostgroups] self.add_host(hostname, 'all') self.add_host(hostname, backend['name']) for group in hostgroups: self.add_host(hostname, group) for field in backend['fields']: var_name = backend['var_prefix'] + field if hostname not in self.result['_meta']['hostvars']: self.result['_meta']['hostvars'][hostname] = {} self.result['_meta']['hostvars'][hostname][var_name] = host[field] def __init__(self): self.defaultgroup = 'group_all' self.default_fields_to_retrieve = ('address', 'alias', 'display_name', 'childs', 'parents') self.backends = [] self.options = None self.parse_ini_file() self.parse_options() self.result = {} self.result['_meta'] = {} self.result['_meta']['hostvars'] = {} self.json_indent = None if self.options.pretty: self.json_indent = 2 if len(self.backends) == 0: sys.exit("Error: Livestatus configuration is missing. See nagios_livestatus.ini.") for backend in self.backends: self.query_backend(backend, self.options.host) if self.options.host: print(json.dumps(self.result['_meta']['hostvars'][self.options.host[0]], indent=self.json_indent)) elif self.options.list: print(json.dumps(self.result, indent=self.json_indent)) else: sys.exit("usage: --list or --host HOSTNAME [--pretty]") NagiosLivestatusInventory() ansible-2.5.1/contrib/inventory/nagios_ndo.ini0000644000000000000000000000057213265756155021433 0ustar rootroot00000000000000# Ansible Nagios external inventory script settings # [ndo] # NDO database URI # Make sure that data is returned as strings and not bytes if using python 3. # See http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html # for supported databases and URI format. # Example for mysqlclient module : database_uri=mysql+mysqldb://user:passwd@hostname/ndo?charset=utf8&use_unicode=1 ansible-2.5.1/contrib/inventory/nagios_ndo.py0000755000000000000000000000733713265756155021315 0ustar rootroot00000000000000#!/usr/bin/env python # (c) 2014, Jonathan Lestrelin # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . """ Nagios NDO external inventory script. ======================================== Returns hosts and hostgroups from Nagios NDO. Configuration is read from `nagios_ndo.ini`. """ import os import argparse import sys try: import configparser except ImportError: import ConfigParser configparser = ConfigParser import json try: from sqlalchemy import text from sqlalchemy.engine import create_engine except ImportError: sys.exit("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy") class NagiosNDOInventory(object): def read_settings(self): config = configparser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_ndo.ini') if config.has_option('ndo', 'database_uri'): self.ndo_database_uri = config.get('ndo', 'database_uri') def read_cli(self): parser = argparse.ArgumentParser() parser.add_argument('--host', nargs=1) parser.add_argument('--list', action='store_true') self.options = parser.parse_args() def get_hosts(self): engine = create_engine(self.ndo_database_uri) connection = engine.connect() select_hosts = text("SELECT display_name \ FROM nagios_hosts") select_hostgroups = text("SELECT alias \ FROM nagios_hostgroups") select_hostgroup_hosts = text("SELECT h.display_name \ FROM nagios_hostgroup_members hgm, nagios_hosts h, nagios_hostgroups hg \ WHERE hgm.hostgroup_id = hg.hostgroup_id \ AND hgm.host_object_id = h.host_object_id \ AND hg.alias =:hostgroup_alias") hosts = connection.execute(select_hosts) self.result['all']['hosts'] = [host['display_name'] for host in hosts] for hostgroup in connection.execute(select_hostgroups): hostgroup_alias = hostgroup['alias'] self.result[hostgroup_alias] = {} hosts = connection.execute(select_hostgroup_hosts, hostgroup_alias=hostgroup_alias) self.result[hostgroup_alias]['hosts'] = [host['display_name'] for host in hosts] def __init__(self): self.defaultgroup = 'group_all' self.ndo_database_uri = None self.options = None self.read_settings() self.read_cli() self.result = {} self.result['all'] = {} self.result['all']['hosts'] = [] self.result['_meta'] = {} self.result['_meta']['hostvars'] = {} if self.ndo_database_uri: self.get_hosts() if self.options.host: print(json.dumps({})) elif self.options.list: print(json.dumps(self.result)) else: sys.exit("usage: --list or --host HOSTNAME") else: sys.exit("Error: Database configuration is missing. See nagios_ndo.ini.") NagiosNDOInventory() ansible-2.5.1/contrib/inventory/nova.ini0000644000000000000000000000225213265756155020253 0ustar rootroot00000000000000# Ansible OpenStack external inventory script # DEPRECATED: please use openstack.py inventory which is configured for # auth using the os-client-config library and either clouds.yaml or standard # openstack environment variables [openstack] #------------------------------------------------------------------------- # Required settings #------------------------------------------------------------------------- # API version version = 2 # OpenStack nova username username = # OpenStack nova api_key or password api_key = # OpenStack nova auth_url auth_url = # OpenStack nova project_id or tenant name project_id = #------------------------------------------------------------------------- # Optional settings #------------------------------------------------------------------------- # Authentication system # auth_system = keystone # Serverarm region name to use # region_name = # Specify a preference for public or private IPs (public is default) # prefer_private = False # What service type (required for newer nova client) # service_type = compute # TODO: Some other options # insecure = # endpoint_type = # extensions = # service_name = ansible-2.5.1/contrib/inventory/nova.py0000755000000000000000000001556513265756155020142 0ustar rootroot00000000000000#!/usr/bin/env python # (c) 2012, Marco Vito Moscaritolo # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # WARNING: This file is deprecated. New work should focus on the openstack.py # inventory module, which properly handles multiple clouds as well as keystone # v3 and keystone auth plugins import sys import re import os import ConfigParser from novaclient import client as nova_client from six import iteritems, itervalues try: import json except ImportError: import simplejson as json sys.stderr.write("WARNING: this inventory module is deprecated. please migrate usage to openstack.py\n") ################################################### # executed with no parameters, return the list of # all groups and hosts NOVA_CONFIG_FILES = [os.getcwd() + "/nova.ini", os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")), "/etc/ansible/nova.ini"] NOVA_DEFAULTS = { 'auth_system': None, 'region_name': None, 'service_type': 'compute', } def nova_load_config_file(): p = ConfigParser.SafeConfigParser(NOVA_DEFAULTS) for path in NOVA_CONFIG_FILES: if os.path.exists(path): p.read(path) return p return None def get_fallback(config, value, section="openstack"): """ Get value from config object and return the value or false """ try: return config.get(section, value) except ConfigParser.NoOptionError: return False def push(data, key, element): """ Assist in items to a dictionary of lists """ if (not element) or (not key): return if key in data: data[key].append(element) else: data[key] = [element] def to_safe(word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' return re.sub(r"[^A-Za-z0-9\-]", "_", word) def get_ips(server, access_ip=True): """ Returns a list of the server's IPs, or the preferred access IP """ private = [] public = [] address_list = [] # Iterate through each servers network(s), get addresses and get type addresses = getattr(server, 'addresses', {}) if len(addresses) > 0: for network in itervalues(addresses): for address in network: if address.get('OS-EXT-IPS:type', False) == 'fixed': private.append(address['addr']) elif address.get('OS-EXT-IPS:type', False) == 'floating': public.append(address['addr']) if not access_ip: address_list.append(server.accessIPv4) address_list.extend(private) address_list.extend(public) return address_list access_ip = None # Append group to list if server.accessIPv4: access_ip = server.accessIPv4 if (not access_ip) and public and not (private and prefer_private): access_ip = public[0] if private and not access_ip: access_ip = private[0] return access_ip def get_metadata(server): """Returns dictionary of all host metadata""" get_ips(server, False) results = {} for key in vars(server): # Extract value value = getattr(server, key) # Generate sanitized key key = 'os_' + re.sub(r"[^A-Za-z0-9\-]", "_", key).lower() # Att value to instance result (exclude manager class) # TODO: maybe use value.__class__ or similar inside of key_name if key != 'os_manager': results[key] = value return results config = nova_load_config_file() if not config: sys.exit('Unable to find configfile in %s' % ', '.join(NOVA_CONFIG_FILES)) # Load up connections info based on config and then environment # variables username = (get_fallback(config, 'username') or os.environ.get('OS_USERNAME', None)) api_key = (get_fallback(config, 'api_key') or os.environ.get('OS_PASSWORD', None)) auth_url = (get_fallback(config, 'auth_url') or os.environ.get('OS_AUTH_URL', None)) project_id = (get_fallback(config, 'project_id') or os.environ.get('OS_TENANT_NAME', None)) region_name = (get_fallback(config, 'region_name') or os.environ.get('OS_REGION_NAME', None)) auth_system = (get_fallback(config, 'auth_system') or os.environ.get('OS_AUTH_SYSTEM', None)) # Determine what type of IP is preferred to return prefer_private = False try: prefer_private = config.getboolean('openstack', 'prefer_private') except ConfigParser.NoOptionError: pass client = nova_client.Client( version=config.get('openstack', 'version'), username=username, api_key=api_key, auth_url=auth_url, region_name=region_name, project_id=project_id, auth_system=auth_system, service_type=config.get('openstack', 'service_type'), ) # Default or added list option if (len(sys.argv) == 2 and sys.argv[1] == '--list') or len(sys.argv) == 1: groups = {'_meta': {'hostvars': {}}} # Cycle on servers for server in client.servers.list(): access_ip = get_ips(server) # Push to name group of 1 push(groups, server.name, access_ip) # Run through each metadata item and add instance to it for key, value in iteritems(server.metadata): composed_key = to_safe('tag_{0}_{1}'.format(key, value)) push(groups, composed_key, access_ip) # Do special handling of group for backwards compat # inventory groups group = server.metadata['group'] if 'group' in server.metadata else 'undefined' push(groups, group, access_ip) # Add vars to _meta key for performance optimization in # Ansible 1.3+ groups['_meta']['hostvars'][access_ip] = get_metadata(server) # Return server list print(json.dumps(groups, sort_keys=True, indent=2)) sys.exit(0) ##################################################### # executed with a hostname as a parameter, return the # variables for that host elif len(sys.argv) == 3 and (sys.argv[1] == '--host'): results = {} ips = [] for server in client.servers.list(): if sys.argv[2] in (get_ips(server) or []): results = get_metadata(server) print(json.dumps(results, sort_keys=True, indent=2)) sys.exit(0) else: sys.exit("usage: --list ..OR.. --host ") ansible-2.5.1/contrib/inventory/nsot.py0000755000000000000000000002314113265756155020147 0ustar rootroot00000000000000#!/usr/bin/env python ''' nsot ==== Ansible Dynamic Inventory to pull hosts from NSoT, a flexible CMDB by Dropbox Features -------- * Define host groups in form of NSoT device attribute criteria * All parameters defined by the spec as of 2015-09-05 are supported. + ``--list``: Returns JSON hash of host groups -> hosts and top-level ``_meta`` -> ``hostvars`` which correspond to all device attributes. Group vars can be specified in the YAML configuration, noted below. + ``--host ``: Returns JSON hash where every item is a device attribute. * In addition to all attributes assigned to resource being returned, script will also append ``site_id`` and ``id`` as facts to utilize. Confguration ------------ Since it'd be annoying and failure prone to guess where you're configuration file is, use ``NSOT_INVENTORY_CONFIG`` to specify the path to it. This file should adhere to the YAML spec. All top-level variable must be desired Ansible group-name hashed with single 'query' item to define the NSoT attribute query. Queries follow the normal NSoT query syntax, `shown here`_ .. _shown here: https://github.com/dropbox/pynsot#set-queries .. code:: yaml routers: query: 'deviceType=ROUTER' vars: a: b c: d juniper_fw: query: 'deviceType=FIREWALL manufacturer=JUNIPER' not_f10: query: '-manufacturer=FORCE10' The inventory will automatically use your ``.pynsotrc`` like normal pynsot from cli would, so make sure that's configured appropriately. .. note:: Attributes I'm showing above are influenced from ones that the Trigger project likes. As is the spirit of NSoT, use whichever attributes work best for your workflow. If config file is blank or absent, the following default groups will be created: * ``routers``: deviceType=ROUTER * ``switches``: deviceType=SWITCH * ``firewalls``: deviceType=FIREWALL These are likely not useful for everyone so please use the configuration. :) .. note:: By default, resources will only be returned for what your default site is set for in your ``~/.pynsotrc``. If you want to specify, add an extra key under the group for ``site: n``. Output Examples --------------- Here are some examples shown from just calling the command directly:: $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --list | jq '.' { "routers": { "hosts": [ "test1.example.com" ], "vars": { "cool_level": "very", "group": "routers" } }, "firewalls": { "hosts": [ "test2.example.com" ], "vars": { "cool_level": "enough", "group": "firewalls" } }, "_meta": { "hostvars": { "test2.example.com": { "make": "SRX", "site_id": 1, "id": 108 }, "test1.example.com": { "make": "MX80", "site_id": 1, "id": 107 } } }, "rtr_and_fw": { "hosts": [ "test1.example.com", "test2.example.com" ], "vars": {} } } $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --host test1 | jq '.' { "make": "MX80", "site_id": 1, "id": 107 } ''' from __future__ import print_function import sys import os import pkg_resources import argparse import json import yaml from textwrap import dedent from pynsot.client import get_api_client from pynsot.app import HttpServerError from click.exceptions import UsageError from six import string_types def warning(*objs): print("WARNING: ", *objs, file=sys.stderr) class NSoTInventory(object): '''NSoT Client object for gather inventory''' def __init__(self): self.config = dict() config_env = os.environ.get('NSOT_INVENTORY_CONFIG') if config_env: try: config_file = os.path.abspath(config_env) except IOError: # If file non-existent, use default config self._config_default() except Exception as e: sys.exit('%s\n' % e) with open(config_file) as f: try: self.config.update(yaml.safe_load(f)) except TypeError: # If empty file, use default config warning('Empty config file') self._config_default() except Exception as e: sys.exit('%s\n' % e) else: # Use defaults if env var missing self._config_default() self.groups = self.config.keys() self.client = get_api_client() self._meta = {'hostvars': dict()} def _config_default(self): default_yaml = ''' --- routers: query: deviceType=ROUTER switches: query: deviceType=SWITCH firewalls: query: deviceType=FIREWALL ''' self.config = yaml.safe_load(dedent(default_yaml)) def do_list(self): '''Direct callback for when ``--list`` is provided Relies on the configuration generated from init to run _inventory_group() ''' inventory = dict() for group, contents in self.config.items(): group_response = self._inventory_group(group, contents) inventory.update(group_response) inventory.update({'_meta': self._meta}) return json.dumps(inventory) def do_host(self, host): return json.dumps(self._hostvars(host)) def _hostvars(self, host): '''Return dictionary of all device attributes Depending on number of devices in NSoT, could be rather slow since this has to request every device resource to filter through ''' device = [i for i in self.client.devices.get() if host in i['hostname']][0] attributes = device['attributes'] attributes.update({'site_id': device['site_id'], 'id': device['id']}) return attributes def _inventory_group(self, group, contents): '''Takes a group and returns inventory for it as dict :param group: Group name :type group: str :param contents: The contents of the group's YAML config :type contents: dict contents param should look like:: { 'query': 'xx', 'vars': 'a': 'b' } Will return something like:: { group: { hosts: [], vars: {}, } ''' query = contents.get('query') hostvars = contents.get('vars', dict()) site = contents.get('site', dict()) obj = {group: dict()} obj[group]['hosts'] = [] obj[group]['vars'] = hostvars try: assert isinstance(query, string_types) except: sys.exit('ERR: Group queries must be a single string\n' ' Group: %s\n' ' Query: %s\n' % (group, query) ) try: if site: site = self.client.sites(site) devices = site.devices.query.get(query=query) else: devices = self.client.devices.query.get(query=query) except HttpServerError as e: if '500' in str(e.response): _site = 'Correct site id?' _attr = 'Queried attributes actually exist?' questions = _site + '\n' + _attr sys.exit('ERR: 500 from server.\n%s' % questions) else: raise except UsageError: sys.exit('ERR: Could not connect to server. Running?') # Would do a list comprehension here, but would like to save code/time # and also acquire attributes in this step for host in devices: # Iterate through each device that matches query, assign hostname # to the group's hosts array and then use this single iteration as # a chance to update self._meta which will be used in the final # return hostname = host['hostname'] obj[group]['hosts'].append(hostname) attributes = host['attributes'] attributes.update({'site_id': host['site_id'], 'id': host['id']}) self._meta['hostvars'].update({hostname: attributes}) return obj def parse_args(): desc = __doc__.splitlines()[4] # Just to avoid being redundant # Establish parser with options and error out if no action provided parser = argparse.ArgumentParser( description=desc, conflict_handler='resolve', ) # Arguments # # Currently accepting (--list | -l) and (--host | -h) # These must not be allowed together parser.add_argument( '--list', '-l', help='Print JSON object containing hosts to STDOUT', action='store_true', dest='list_', # Avoiding syntax highlighting for list ) parser.add_argument( '--host', '-h', help='Print JSON object containing hostvars for ', action='store', ) args = parser.parse_args() if not args.list_ and not args.host: # Require at least one option parser.exit(status=1, message='No action requested') if args.list_ and args.host: # Do not allow multiple options parser.exit(status=1, message='Too many actions requested') return args def main(): '''Set up argument handling and callback routing''' args = parse_args() client = NSoTInventory() # Callback condition if args.list_: print(client.do_list()) elif args.host: print(client.do_host(args.host)) if __name__ == '__main__': main() ansible-2.5.1/contrib/inventory/nsot.yaml0000644000000000000000000000065313265756155020461 0ustar rootroot00000000000000--- juniper_routers: query: 'deviceType=ROUTER manufacturer=JUNIPER' vars: group: juniper_routers netconf: true os: junos cisco_asa: query: 'manufacturer=CISCO deviceType=FIREWALL' vars: group: cisco_asa routed_vpn: false stateful: true old_cisco_asa: query: 'manufacturer=CISCO deviceType=FIREWALL -softwareVersion=8.3+' vars: old_nat: true not_f10: query: '-manufacturer=FORCE10' ansible-2.5.1/contrib/inventory/openshift.py0000755000000000000000000000631213265756155021164 0ustar rootroot00000000000000#!/usr/bin/env python # (c) 2013, Michael Scherer # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . DOCUMENTATION = ''' --- inventory: openshift short_description: Openshift gears external inventory script description: - Generates inventory of Openshift gears using the REST interface - this permit to reuse playbook to setup an Openshift gear version_added: None author: Michael Scherer ''' try: import json except ImportError: import simplejson as json import os import os.path import sys import ConfigParser import StringIO from ansible.module_utils.urls import open_url configparser = None def get_from_rhc_config(variable): global configparser CONF_FILE = os.path.expanduser('~/.openshift/express.conf') if os.path.exists(CONF_FILE): if not configparser: ini_str = '[root]\n' + open(CONF_FILE, 'r').read() configparser = ConfigParser.SafeConfigParser() configparser.readfp(StringIO.StringIO(ini_str)) try: return configparser.get('root', variable) except ConfigParser.NoOptionError: return None def get_config(env_var, config_var): result = os.getenv(env_var) if not result: result = get_from_rhc_config(config_var) if not result: sys.exit("failed=True msg='missing %s'" % env_var) return result def get_json_from_api(url, username, password): headers = {'Accept': 'application/json; version=1.5'} response = open_url(url, headers=headers, url_username=username, url_password=password) return json.loads(response.read())['data'] username = get_config('ANSIBLE_OPENSHIFT_USERNAME', 'default_rhlogin') password = get_config('ANSIBLE_OPENSHIFT_PASSWORD', 'password') broker_url = 'https://%s/broker/rest/' % get_config('ANSIBLE_OPENSHIFT_BROKER', 'libra_server') response = get_json_from_api(broker_url + '/domains', username, password) response = get_json_from_api("%s/domains/%s/applications" % (broker_url, response[0]['id']), username, password) result = {} for app in response: # ssh://520311404832ce3e570000ff@blog-johndoe.example.org (user, host) = app['ssh_url'][6:].split('@') app_name = host.split('-')[0] result[app_name] = {} result[app_name]['hosts'] = [] result[app_name]['hosts'].append(host) result[app_name]['vars'] = {} result[app_name]['vars']['ansible_ssh_user'] = user if len(sys.argv) == 2 and sys.argv[1] == '--list': print(json.dumps(result)) elif len(sys.argv) == 3 and sys.argv[1] == '--host': print(json.dumps({})) else: print("Need an argument, either --list or --host ") ansible-2.5.1/contrib/inventory/openstack.py0000755000000000000000000002305313265756155021155 0ustar rootroot00000000000000#!/usr/bin/env python # Copyright (c) 2012, Marco Vito Moscaritolo # Copyright (c) 2013, Jesse Keating # Copyright (c) 2015, Hewlett-Packard Development Company, L.P. # Copyright (c) 2016, Rackspace Australia # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see . # The OpenStack Inventory module uses os-client-config for configuration. # https://github.com/openstack/os-client-config # This means it will either: # - Respect normal OS_* environment variables like other OpenStack tools # - Read values from a clouds.yaml file. # If you want to configure via clouds.yaml, you can put the file in: # - Current directory # - ~/.config/openstack/clouds.yaml # - /etc/openstack/clouds.yaml # - /etc/ansible/openstack.yml # The clouds.yaml file can contain entries for multiple clouds and multiple # regions of those clouds. If it does, this inventory module will by default # connect to all of them and present them as one contiguous inventory. You # can limit to one cloud by passing the `--cloud` parameter, or use the # OS_CLOUD environment variable. If caching is enabled, and a cloud is # selected, then per-cloud cache folders will be used. # # See the adjacent openstack.yml file for an example config file # There are two ansible inventory specific options that can be set in # the inventory section. # expand_hostvars controls whether or not the inventory will make extra API # calls to fill out additional information about each server # use_hostnames changes the behavior from registering every host with its UUID # and making a group of its hostname to only doing this if the # hostname in question has more than one server # fail_on_errors causes the inventory to fail and return no hosts if one cloud # has failed (for example, bad credentials or being offline). # When set to False, the inventory will return hosts from # whichever other clouds it can contact. (Default: True) # # Also it is possible to pass the correct user by setting an ansible_user: $myuser # metadata attribute. import argparse import collections import os import sys import time from distutils.version import StrictVersion try: import json except: import simplejson as json import os_client_config import shade import shade.inventory CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml'] def get_groups_from_server(server_vars, namegroup=True): groups = [] region = server_vars['region'] cloud = server_vars['cloud'] metadata = server_vars.get('metadata', {}) # Create a group for the cloud groups.append(cloud) # Create a group on region groups.append(region) # And one by cloud_region groups.append("%s_%s" % (cloud, region)) # Check if group metadata key in servers' metadata if 'group' in metadata: groups.append(metadata['group']) for extra_group in metadata.get('groups', '').split(','): if extra_group: groups.append(extra_group.strip()) groups.append('instance-%s' % server_vars['id']) if namegroup: groups.append(server_vars['name']) for key in ('flavor', 'image'): if 'name' in server_vars[key]: groups.append('%s-%s' % (key, server_vars[key]['name'])) for key, value in iter(metadata.items()): groups.append('meta-%s_%s' % (key, value)) az = server_vars.get('az', None) if az: # Make groups for az, region_az and cloud_region_az groups.append(az) groups.append('%s_%s' % (region, az)) groups.append('%s_%s_%s' % (cloud, region, az)) return groups def get_host_groups(inventory, refresh=False, cloud=None): (cache_file, cache_expiration_time) = get_cache_settings(cloud) if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh): groups = to_json(get_host_groups_from_cloud(inventory)) with open(cache_file, 'w') as f: f.write(groups) else: with open(cache_file, 'r') as f: groups = f.read() return groups def append_hostvars(hostvars, groups, key, server, namegroup=False): hostvars[key] = dict( ansible_ssh_host=server['interface_ip'], ansible_host=server['interface_ip'], openstack=server) metadata = server.get('metadata', {}) if 'ansible_user' in metadata: hostvars[key]['ansible_user'] = metadata['ansible_user'] for group in get_groups_from_server(server, namegroup=namegroup): groups[group].append(key) def get_host_groups_from_cloud(inventory): groups = collections.defaultdict(list) firstpass = collections.defaultdict(list) hostvars = {} list_args = {} if hasattr(inventory, 'extra_config'): use_hostnames = inventory.extra_config['use_hostnames'] list_args['expand'] = inventory.extra_config['expand_hostvars'] if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"): list_args['fail_on_cloud_config'] = \ inventory.extra_config['fail_on_errors'] else: use_hostnames = False for server in inventory.list_hosts(**list_args): if 'interface_ip' not in server: continue firstpass[server['name']].append(server) for name, servers in firstpass.items(): if len(servers) == 1 and use_hostnames: append_hostvars(hostvars, groups, name, servers[0]) else: server_ids = set() # Trap for duplicate results for server in servers: server_ids.add(server['id']) if len(server_ids) == 1 and use_hostnames: append_hostvars(hostvars, groups, name, servers[0]) else: for server in servers: append_hostvars( hostvars, groups, server['id'], server, namegroup=True) groups['_meta'] = {'hostvars': hostvars} return groups def is_cache_stale(cache_file, cache_expiration_time, refresh=False): ''' Determines if cache file has expired, or if it is still valid ''' if refresh: return True if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0: mod_time = os.path.getmtime(cache_file) current_time = time.time() if (mod_time + cache_expiration_time) > current_time: return False return True def get_cache_settings(cloud=None): config = os_client_config.config.OpenStackConfig( config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES) # For inventory-wide caching cache_expiration_time = config.get_cache_expiration_time() cache_path = config.get_cache_path() if cloud: cache_path = '{0}_{1}'.format(cache_path, cloud) if not os.path.exists(cache_path): os.makedirs(cache_path) cache_file = os.path.join(cache_path, 'ansible-inventory.cache') return (cache_file, cache_expiration_time) def to_json(in_dict): return json.dumps(in_dict, sort_keys=True, indent=2) def parse_args(): parser = argparse.ArgumentParser(description='OpenStack Inventory Module') parser.add_argument('--cloud', default=os.environ.get('OS_CLOUD'), help='Cloud name (default: None') parser.add_argument('--private', action='store_true', help='Use private address for ansible host') parser.add_argument('--refresh', action='store_true', help='Refresh cached information') parser.add_argument('--debug', action='store_true', default=False, help='Enable debug output') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--list', action='store_true', help='List active servers') group.add_argument('--host', help='List details about the specific host') return parser.parse_args() def main(): args = parse_args() try: config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES shade.simple_logging(debug=args.debug) inventory_args = dict( refresh=args.refresh, config_files=config_files, private=args.private, cloud=args.cloud, ) if hasattr(shade.inventory.OpenStackInventory, 'extra_config'): inventory_args.update(dict( config_key='ansible', config_defaults={ 'use_hostnames': False, 'expand_hostvars': True, 'fail_on_errors': True, } )) inventory = shade.inventory.OpenStackInventory(**inventory_args) if args.list: output = get_host_groups(inventory, refresh=args.refresh, cloud=args.cloud) elif args.host: output = to_json(inventory.get_host(args.host)) print(output) except shade.OpenStackCloudException as e: sys.stderr.write('%s\n' % e.message) sys.exit(1) sys.exit(0) if __name__ == '__main__': main() ansible-2.5.1/contrib/inventory/openstack.yml0000644000000000000000000000107013265756155021316 0ustar rootroot00000000000000clouds: vexxhost: profile: vexxhost auth: project_name: 39e296b2-fc96-42bf-8091-cb742fa13da9 username: fb886a9b-c37b-442a-9be3-964bed961e04 password: fantastic-password1 rax: cloud: rackspace auth: username: example password: spectacular-password project_id: 2352426 region_name: DFW,ORD,IAD devstack: auth: auth_url: https://devstack.example.com username: stack password: stack project_name: stack ansible: use_hostnames: True expand_hostvars: False fail_on_errors: True ansible-2.5.1/contrib/inventory/openvz.py0000755000000000000000000000540213265756155020505 0ustar rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # openvz.py # # Copyright 2014 jordonr # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # # Inspired by libvirt_lxc.py inventory script # https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py # # Groups are determined by the description field of openvz guests # multiple groups can be separated by commas: webserver,dbserver from subprocess import Popen, PIPE import sys import json # List openvz hosts vzhosts = ['vzhost1', 'vzhost2', 'vzhost3'] # Add openvz hosts to the inventory and Add "_meta" trick inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}} # default group, when description not defined default_group = ['vzguest'] def get_guests(): # Loop through vzhosts for h in vzhosts: # SSH to vzhost and get the list of guests in json pipe = Popen(['ssh', h, 'vzlist', '-j'], stdout=PIPE, universal_newlines=True) # Load Json info of guests json_data = json.loads(pipe.stdout.read()) # loop through guests for j in json_data: # Add information to host vars inventory['_meta']['hostvars'][j['hostname']] = { 'ctid': j['ctid'], 'veid': j['veid'], 'vpsid': j['vpsid'], 'private_path': j['private'], 'root_path': j['root'], 'ip': j['ip'] } # determine group from guest description if j['description'] is not None: groups = j['description'].split(",") else: groups = default_group # add guest to inventory for g in groups: if g not in inventory: inventory[g] = {'hosts': []} inventory[g]['hosts'].append(j['hostname']) return inventory if len(sys.argv) == 2 and sys.argv[1] == '--list': inv_json = get_guests() print(json.dumps(inv_json, sort_keys=True)) elif len(sys.argv) == 3 and sys.argv[1] == '--host': print(json.dumps({})) else: print("Need an argument, either --list or --host ") ansible-2.5.1/contrib/inventory/ovirt.ini0000644000000000000000000000250213265756155020451 0ustar rootroot00000000000000# Copyright 2013 Google Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Author: Josha Inglis based on the gce.ini by Eric Johnson [ovirt] # ovirt Service Account configuration information can be stored in the # libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already # exist in your PYTHONPATH and be picked up automatically with an import # statement in the inventory script. However, you can specify an absolute # path to the secrets.py file with 'libcloud_secrets' parameter. ovirt_api_secrets = # If you are not going to use a 'secrets.py' file, you can set the necessary # authorization parameters here. ovirt_url = ovirt_username = ovirt_password = ovirt_ca_file = ansible-2.5.1/contrib/inventory/ovirt.py0000755000000000000000000002326513265756155020336 0ustar rootroot00000000000000#!/usr/bin/env python # Copyright 2015 IIX Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . """ ovirt external inventory script ================================= Generates inventory that Ansible can understand by making API requests to oVirt via the ovirt-engine-sdk-python library. When run against a specific host, this script returns the following variables based on the data obtained from the ovirt_sdk Node object: - ovirt_uuid - ovirt_id - ovirt_image - ovirt_machine_type - ovirt_ips - ovirt_name - ovirt_description - ovirt_status - ovirt_zone - ovirt_tags - ovirt_stats When run in --list mode, instances are grouped by the following categories: - zone: zone group name. - instance tags: An entry is created for each tag. For example, if you have two instances with a common tag called 'foo', they will both be grouped together under the 'tag_foo' name. - network name: the name of the network is appended to 'network_' (e.g. the 'default' network will result in a group named 'network_default') - running status: group name prefixed with 'status_' (e.g. status_up, status_down,..) Examples: Execute uname on all instances in the us-central1-a zone $ ansible -i ovirt.py us-central1-a -m shell -a "/bin/uname -a" Use the ovirt inventory script to print out instance specific information $ contrib/inventory/ovirt.py --host my_instance Author: Josha Inglis based on the gce.py by Eric Johnson Version: 0.0.1 """ USER_AGENT_PRODUCT = "Ansible-ovirt_inventory_plugin" USER_AGENT_VERSION = "v1" import sys import os import argparse import ConfigParser from collections import defaultdict try: import json except ImportError: # noinspection PyUnresolvedReferences,PyPackageRequirements import simplejson as json try: # noinspection PyUnresolvedReferences from ovirtsdk.api import API # noinspection PyUnresolvedReferences from ovirtsdk.xml import params except ImportError: print("ovirt inventory script requires ovirt-engine-sdk-python") sys.exit(1) class OVirtInventory(object): def __init__(self): # Read settings and parse CLI arguments self.args = self.parse_cli_args() self.driver = self.get_ovirt_driver() # Just display data for specific host if self.args.host: print(self.json_format_dict( self.node_to_dict(self.get_instance(self.args.host)), pretty=self.args.pretty )) sys.exit(0) # Otherwise, assume user wants all instances grouped print( self.json_format_dict( data=self.group_instances(), pretty=self.args.pretty ) ) sys.exit(0) @staticmethod def get_ovirt_driver(): """ Determine the ovirt authorization settings and return a ovirt_sdk driver. :rtype : ovirtsdk.api.API """ kwargs = {} ovirt_ini_default_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "ovirt.ini") ovirt_ini_path = os.environ.get('OVIRT_INI_PATH', ovirt_ini_default_path) # Create a ConfigParser. # This provides empty defaults to each key, so that environment # variable configuration (as opposed to INI configuration) is able # to work. config = ConfigParser.SafeConfigParser(defaults={ 'ovirt_url': '', 'ovirt_username': '', 'ovirt_password': '', 'ovirt_api_secrets': '', }) if 'ovirt' not in config.sections(): config.add_section('ovirt') config.read(ovirt_ini_path) # Attempt to get ovirt params from a configuration file, if one # exists. secrets_path = config.get('ovirt', 'ovirt_api_secrets') secrets_found = False try: # noinspection PyUnresolvedReferences,PyPackageRequirements import secrets kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {}) secrets_found = True except ImportError: pass if not secrets_found and secrets_path: if not secrets_path.endswith('secrets.py'): err = "Must specify ovirt_sdk secrets file as /absolute/path/to/secrets.py" print(err) sys.exit(1) sys.path.append(os.path.dirname(secrets_path)) try: # noinspection PyUnresolvedReferences,PyPackageRequirements import secrets kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {}) except ImportError: pass if not secrets_found: kwargs = { 'url': config.get('ovirt', 'ovirt_url'), 'username': config.get('ovirt', 'ovirt_username'), 'password': config.get('ovirt', 'ovirt_password'), } # If the appropriate environment variables are set, they override # other configuration; process those into our args and kwargs. kwargs['url'] = os.environ.get('OVIRT_URL', kwargs['url']) kwargs['username'] = next(val for val in [os.environ.get('OVIRT_EMAIL'), os.environ.get('OVIRT_USERNAME'), kwargs['username']] if val is not None) kwargs['password'] = next(val for val in [os.environ.get('OVIRT_PASS'), os.environ.get('OVIRT_PASSWORD'), kwargs['password']] if val is not None) # Retrieve and return the ovirt driver. return API(insecure=True, **kwargs) @staticmethod def parse_cli_args(): """ Command line argument processing :rtype : argparse.Namespace """ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on ovirt') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all information about an instance') parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)') return parser.parse_args() def node_to_dict(self, inst): """ :type inst: params.VM """ if inst is None: return {} inst.get_custom_properties() ips = [ip.get_address() for ip in inst.get_guest_info().get_ips().get_ip()] \ if inst.get_guest_info() is not None else [] stats = {} for stat in inst.get_statistics().list(): stats[stat.get_name()] = stat.get_values().get_value()[0].get_datum() return { 'ovirt_uuid': inst.get_id(), 'ovirt_id': inst.get_id(), 'ovirt_image': inst.get_os().get_type(), 'ovirt_machine_type': self.get_machine_type(inst), 'ovirt_ips': ips, 'ovirt_name': inst.get_name(), 'ovirt_description': inst.get_description(), 'ovirt_status': inst.get_status().get_state(), 'ovirt_zone': inst.get_cluster().get_id(), 'ovirt_tags': self.get_tags(inst), 'ovirt_stats': stats, # Hosts don't have a public name, so we add an IP 'ansible_ssh_host': ips[0] if len(ips) > 0 else None } @staticmethod def get_tags(inst): """ :type inst: params.VM """ return [x.get_name() for x in inst.get_tags().list()] def get_machine_type(self, inst): inst_type = inst.get_instance_type() if inst_type: return self.driver.instancetypes.get(id=inst_type.id).name # noinspection PyBroadException,PyUnusedLocal def get_instance(self, instance_name): """Gets details about a specific instance """ try: return self.driver.vms.get(name=instance_name) except Exception as e: return None def group_instances(self): """Group all instances""" groups = defaultdict(list) meta = {"hostvars": {}} for node in self.driver.vms.list(): assert isinstance(node, params.VM) name = node.get_name() meta["hostvars"][name] = self.node_to_dict(node) zone = node.get_cluster().get_name() groups[zone].append(name) tags = self.get_tags(node) for t in tags: tag = 'tag_%s' % t groups[tag].append(name) nets = [x.get_name() for x in node.get_nics().list()] for net in nets: net = 'network_%s' % net groups[net].append(name) status = node.get_status().get_state() stat = 'status_%s' % status.lower() if stat in groups: groups[stat].append(name) else: groups[stat] = [name] groups["_meta"] = meta return groups @staticmethod def json_format_dict(data, pretty=False): """ Converts a dict to a JSON object and dumps it as a formatted string """ if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) # Run the script OVirtInventory() ansible-2.5.1/contrib/inventory/ovirt4.py0000755000000000000000000001716513265756155020424 0ustar rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2016 Red Hat, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # """ oVirt dynamic inventory script ================================= Generates dynamic inventory file for oVirt. Script will return following attributes for each virtual machine: - id - name - host - cluster - status - description - fqdn - os_type - template - tags - statistics - devices When run in --list mode, virtual machines are grouped by the following categories: - cluster - tag - status Note: If there is some virtual machine which has has more tags it will be in both tag records. Examples: # Execute update of system on webserver virtual machine: $ ansible -i contrib/inventory/ovirt4.py webserver -m yum -a "name=* state=latest" # Get webserver virtual machine information: $ contrib/inventory/ovirt4.py --host webserver Author: Ondra Machacek (@machacekondra) """ import argparse import os import sys from collections import defaultdict try: import ConfigParser as configparser except ImportError: import configparser try: import json except ImportError: import simplejson as json try: import ovirtsdk4 as sdk import ovirtsdk4.types as otypes except ImportError: print('oVirt inventory script requires ovirt-engine-sdk-python >= 4.0.0') sys.exit(1) def parse_args(): """ Create command line parser for oVirt dynamic inventory script. """ parser = argparse.ArgumentParser( description='Ansible dynamic inventory script for oVirt.', ) parser.add_argument( '--list', action='store_true', default=True, help='Get data of all virtual machines (default: True).', ) parser.add_argument( '--host', help='Get data of virtual machines running on specified host.', ) parser.add_argument( '--pretty', action='store_true', default=False, help='Pretty format (default: False).', ) return parser.parse_args() def create_connection(): """ Create a connection to oVirt engine API. """ # Get the path of the configuration file, by default use # 'ovirt.ini' file in script directory: default_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'ovirt.ini', ) config_path = os.environ.get('OVIRT_INI_PATH', default_path) # Create parser and add ovirt section if it doesn't exist: config = configparser.SafeConfigParser( defaults={ 'ovirt_url': os.environ.get('OVIRT_URL'), 'ovirt_username': os.environ.get('OVIRT_USERNAME'), 'ovirt_password': os.environ.get('OVIRT_PASSWORD'), 'ovirt_ca_file': os.environ.get('OVIRT_CAFILE'), } ) if not config.has_section('ovirt'): config.add_section('ovirt') config.read(config_path) # Create a connection with options defined in ini file: return sdk.Connection( url=config.get('ovirt', 'ovirt_url'), username=config.get('ovirt', 'ovirt_username'), password=config.get('ovirt', 'ovirt_password', raw=True), ca_file=config.get('ovirt', 'ovirt_ca_file'), insecure=config.get('ovirt', 'ovirt_ca_file') is None, ) def get_dict_of_struct(connection, vm): """ Transform SDK Vm Struct type to Python dictionary. """ if vm is None: return dict() vms_service = connection.system_service().vms_service() clusters_service = connection.system_service().clusters_service() vm_service = vms_service.vm_service(vm.id) devices = vm_service.reported_devices_service().list() tags = vm_service.tags_service().list() stats = vm_service.statistics_service().list() labels = vm_service.affinity_labels_service().list() groups = clusters_service.cluster_service( vm.cluster.id ).affinity_groups_service().list() return { 'id': vm.id, 'name': vm.name, 'host': connection.follow_link(vm.host).name if vm.host else None, 'cluster': connection.follow_link(vm.cluster).name, 'status': str(vm.status), 'description': vm.description, 'fqdn': vm.fqdn, 'os_type': vm.os.type, 'template': connection.follow_link(vm.template).name, 'tags': [tag.name for tag in tags], 'affinity_labels': [label.name for label in labels], 'affinity_groups': [ group.name for group in groups if vm.name in [vm.name for vm in connection.follow_link(group.vms)] ], 'statistics': dict( (stat.name, stat.values[0].datum) for stat in stats ), 'devices': dict( (device.name, [ip.address for ip in device.ips]) for device in devices if device.ips ), 'ansible_host': next((device.ips[0].address for device in devices if device.ips), None) } def get_data(connection, vm_name=None): """ Obtain data of `vm_name` if specified, otherwise obtain data of all vms. """ vms_service = connection.system_service().vms_service() clusters_service = connection.system_service().clusters_service() if vm_name: vm = vms_service.list(search='name=%s' % vm_name) or [None] data = get_dict_of_struct( connection=connection, vm=vm[0], ) else: vms = dict() data = defaultdict(list) for vm in vms_service.list(): name = vm.name vm_service = vms_service.vm_service(vm.id) cluster_service = clusters_service.cluster_service(vm.cluster.id) # Add vm to vms dict: vms[name] = get_dict_of_struct(connection, vm) # Add vm to cluster group: cluster_name = connection.follow_link(vm.cluster).name data['cluster_%s' % cluster_name].append(name) # Add vm to tag group: tags_service = vm_service.tags_service() for tag in tags_service.list(): data['tag_%s' % tag.name].append(name) # Add vm to status group: data['status_%s' % vm.status].append(name) # Add vm to affinity group: for group in cluster_service.affinity_groups_service().list(): if vm.name in [ v.name for v in connection.follow_link(group.vms) ]: data['affinity_group_%s' % group.name].append(vm.name) # Add vm to affinity label group: affinity_labels_service = vm_service.affinity_labels_service() for label in affinity_labels_service.list(): data['affinity_label_%s' % label.name].append(name) data["_meta"] = { 'hostvars': vms, } return data def main(): args = parse_args() connection = create_connection() print( json.dumps( obj=get_data( connection=connection, vm_name=args.host, ), sort_keys=args.pretty, indent=args.pretty * 2, ) ) if __name__ == '__main__': main() ansible-2.5.1/contrib/inventory/packet_net.ini0000644000000000000000000000343013265756155021424 0ustar rootroot00000000000000# Ansible Packet.net external inventory script settings # [packet] # Packet projects to get info for. Set this to 'all' to get info for all # projects in Packet and merge the results together. Alternatively, set # this to a comma separated list of projects. E.g. 'project-1,project-3,project-4' projects = all projects_exclude = # By default, packet devices in all state are returned. Specify # packet device states to return as a comma-separated list. # device_states = active, inactive, queued, provisioning # items per page to retrieve from packet api at a time items_per_page = 999 # API calls to Packet are costly. For this reason, we cache the results of an API # call. Set this to the path you want cache files to be written to. Two files # will be written to this directory: # - ansible-packet.cache # - ansible-packet.index cache_path = ~/.ansible/tmp # The number of seconds a cache file is considered valid. After this many # seconds, a new API call will be made, and the cache file will be updated. # To disable the cache, set this value to 0 cache_max_age = 300 # Organize groups into a nested/hierarchy instead of a flat namespace. nested_groups = False # Replace - tags when creating groups to avoid issues with ansible replace_dash_in_groups = True # The packet inventory output can become very large. To manage its size, # configure which groups should be created. group_by_device_id = True group_by_hostname = True group_by_facility = True group_by_project = True group_by_operating_system = True group_by_plan_type = True group_by_tags = True group_by_tag_none = True # If you only want to include hosts that match a certain regular expression # pattern_include = staging-* # If you want to exclude any hosts that match a certain regular expression # pattern_exclude = staging-* ansible-2.5.1/contrib/inventory/packet_net.py0000755000000000000000000004337013265756155021307 0ustar rootroot00000000000000#!/usr/bin/env python ''' Packet.net external inventory script ================================= Generates inventory that Ansible can understand by making API request to Packet.net using the Packet library. NOTE: This script assumes Ansible is being executed where the environment variable needed for Packet API Token already been set: export PACKET_API_TOKEN=Bfse9F24SFtfs423Gsd3ifGsd43sSdfs This script also assumes there is a packet_net.ini file alongside it. To specify a different path to packet_net.ini, define the PACKET_NET_INI_PATH environment variable: export PACKET_NET_INI_PATH=/path/to/my_packet_net.ini ''' # (c) 2016, Peter Sankauskas # (c) 2017, Tomas Karasek # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### import sys import os import argparse import re from time import time import six from six.moves import configparser try: import packet except ImportError as e: sys.exit("failed=True msg='`packet-python` library required for this script'") import traceback try: import json except ImportError: import simplejson as json ini_section = 'packet' class PacketInventory(object): def _empty_inventory(self): return {"_meta": {"hostvars": {}}} def __init__(self): ''' Main execution path ''' # Inventory grouped by device IDs, tags, security groups, regions, # and availability zones self.inventory = self._empty_inventory() # Index of hostname (address) to device ID self.index = {} # Read settings and parse CLI arguments self.parse_cli_args() self.read_settings() # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() # Data to print if self.args.host: data_to_print = self.get_host_info() elif self.args.list: # Display list of devices for inventory if self.inventory == self._empty_inventory(): data_to_print = self.get_inventory_from_cache() else: data_to_print = self.json_format_dict(self.inventory, True) print(data_to_print) def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): ''' Reads the settings from the packet_net.ini file ''' if six.PY3: config = configparser.ConfigParser() else: config = configparser.SafeConfigParser() _ini_path_raw = os.environ.get('PACKET_NET_INI_PATH') if _ini_path_raw: packet_ini_path = os.path.expanduser(os.path.expandvars(_ini_path_raw)) else: packet_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'packet_net.ini') config.read(packet_ini_path) # items per page self.items_per_page = 999 if config.has_option(ini_section, 'items_per_page'): config.get(ini_section, 'items_per_page') # Instance states to be gathered in inventory. Default is all of them. packet_valid_device_states = [ 'active', 'inactive', 'queued', 'provisioning' ] self.packet_device_states = [] if config.has_option(ini_section, 'device_states'): for device_state in config.get(ini_section, 'device_states').split(','): device_state = device_state.strip() if device_state not in packet_valid_device_states: continue self.packet_device_states.append(device_state) else: self.packet_device_states = packet_valid_device_states # Cache related cache_dir = os.path.expanduser(config.get(ini_section, 'cache_path')) if not os.path.exists(cache_dir): os.makedirs(cache_dir) self.cache_path_cache = cache_dir + "/ansible-packet.cache" self.cache_path_index = cache_dir + "/ansible-packet.index" self.cache_max_age = config.getint(ini_section, 'cache_max_age') # Configure nested groups instead of flat namespace. if config.has_option(ini_section, 'nested_groups'): self.nested_groups = config.getboolean(ini_section, 'nested_groups') else: self.nested_groups = False # Replace dash or not in group names if config.has_option(ini_section, 'replace_dash_in_groups'): self.replace_dash_in_groups = config.getboolean(ini_section, 'replace_dash_in_groups') else: self.replace_dash_in_groups = True # Configure which groups should be created. group_by_options = [ 'group_by_device_id', 'group_by_hostname', 'group_by_facility', 'group_by_project', 'group_by_operating_system', 'group_by_plan_type', 'group_by_tags', 'group_by_tag_none', ] for option in group_by_options: if config.has_option(ini_section, option): setattr(self, option, config.getboolean(ini_section, option)) else: setattr(self, option, True) # Do we need to just include hosts that match a pattern? try: pattern_include = config.get(ini_section, 'pattern_include') if pattern_include and len(pattern_include) > 0: self.pattern_include = re.compile(pattern_include) else: self.pattern_include = None except configparser.NoOptionError: self.pattern_include = None # Do we need to exclude hosts that match a pattern? try: pattern_exclude = config.get(ini_section, 'pattern_exclude') if pattern_exclude and len(pattern_exclude) > 0: self.pattern_exclude = re.compile(pattern_exclude) else: self.pattern_exclude = None except configparser.NoOptionError: self.pattern_exclude = None # Projects self.projects = [] configProjects = config.get(ini_section, 'projects') configProjects_exclude = config.get(ini_section, 'projects_exclude') if (configProjects == 'all'): for projectInfo in self.get_projects(): if projectInfo.name not in configProjects_exclude: self.projects.append(projectInfo.name) else: self.projects = configProjects.split(",") def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Packet') parser.add_argument('--list', action='store_true', default=True, help='List Devices (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific device') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to Packet (default: False - use cache files)') self.args = parser.parse_args() def do_api_calls_update_cache(self): ''' Do API calls to each region, and save data in cache files ''' for projectInfo in self.get_projects(): if projectInfo.name in self.projects: self.get_devices_by_project(projectInfo) self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def connect(self): ''' create connection to api server''' token = os.environ.get('PACKET_API_TOKEN') if token is None: raise Exception("Error reading token from environment (PACKET_API_TOKEN)!") manager = packet.Manager(auth_token=token) return manager def get_projects(self): '''Makes a Packet API call to get the list of projects''' try: manager = self.connect() projects = manager.list_projects() return projects except Exception as e: traceback.print_exc() self.fail_with_error(e, 'getting Packet projects') def get_devices_by_project(self, project): ''' Makes an Packet API call to the list of devices in a particular project ''' params = { 'per_page': self.items_per_page } try: manager = self.connect() devices = manager.list_devices(project_id=project.id, params=params) for device in devices: self.add_device(device, project) except Exception as e: traceback.print_exc() self.fail_with_error(e, 'getting Packet devices') def fail_with_error(self, err_msg, err_operation=None): '''log an error to std err for ansible-playbook to consume and exit''' if err_operation: err_msg = 'ERROR: "{err_msg}", while: {err_operation}\n'.format( err_msg=err_msg, err_operation=err_operation) sys.stderr.write(err_msg) sys.exit(1) def get_device(self, device_id): manager = self.connect() device = manager.get_device(device_id) return device def add_device(self, device, project): ''' Adds a device to the inventory and index, as long as it is addressable ''' # Only return devices with desired device states if device.state not in self.packet_device_states: return # Select the best destination address dest = None for ip_address in device.ip_addresses: if ip_address['public'] is True and ip_address['address_family'] == 4: dest = ip_address['address'] if not dest: # Skip devices we cannot address (e.g. private VPC subnet) return # if we only want to include hosts that match a pattern, skip those that don't if self.pattern_include and not self.pattern_include.match(device.hostname): return # if we need to exclude hosts that match a pattern, skip those if self.pattern_exclude and self.pattern_exclude.match(device.hostname): return # Add to index self.index[dest] = [project.id, device.id] # Inventory: Group by device ID (always a group of 1) if self.group_by_device_id: self.inventory[device.id] = [dest] if self.nested_groups: self.push_group(self.inventory, 'devices', device.id) # Inventory: Group by device name (hopefully a group of 1) if self.group_by_hostname: self.push(self.inventory, device.hostname, dest) if self.nested_groups: self.push_group(self.inventory, 'hostnames', project.name) # Inventory: Group by project if self.group_by_project: self.push(self.inventory, project.name, dest) if self.nested_groups: self.push_group(self.inventory, 'projects', project.name) # Inventory: Group by facility if self.group_by_facility: self.push(self.inventory, device.facility['code'], dest) if self.nested_groups: if self.group_by_facility: self.push_group(self.inventory, project.name, device.facility['code']) # Inventory: Group by OS if self.group_by_operating_system: self.push(self.inventory, device.operating_system.slug, dest) if self.nested_groups: self.push_group(self.inventory, 'operating_systems', device.operating_system.slug) # Inventory: Group by plan type if self.group_by_plan_type: self.push(self.inventory, device.plan['slug'], dest) if self.nested_groups: self.push_group(self.inventory, 'plans', device.plan['slug']) # Inventory: Group by tag keys if self.group_by_tags: for k in device.tags: key = self.to_safe("tag_" + k) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) # Global Tag: devices without tags if self.group_by_tag_none and len(device.tags) == 0: self.push(self.inventory, 'tag_none', dest) if self.nested_groups: self.push_group(self.inventory, 'tags', 'tag_none') # Global Tag: tag all Packet devices self.push(self.inventory, 'packet', dest) self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_device(device) def get_host_info_dict_from_device(self, device): device_vars = {} for key in vars(device): value = getattr(device, key) key = self.to_safe('packet_' + key) # Handle complex types if key == 'packet_state': device_vars[key] = device.state or '' elif key == 'packet_hostname': device_vars[key] = value elif isinstance(value, (int, bool)): device_vars[key] = value elif isinstance(value, six.string_types): device_vars[key] = value.strip() elif value is None: device_vars[key] = '' elif key == 'packet_facility': device_vars[key] = value['code'] elif key == 'packet_operating_system': device_vars[key] = value.slug elif key == 'packet_plan': device_vars[key] = value['slug'] elif key == 'packet_tags': for k in value: key = self.to_safe('packet_tag_' + k) device_vars[key] = k else: pass # print key # print type(value) # print value return device_vars def get_host_info(self): ''' Get variables about a specific host ''' if len(self.index) == 0: # Need to load index from cache self.load_index_from_cache() if self.args.host not in self.index: # try updating the cache self.do_api_calls_update_cache() if self.args.host not in self.index: # host might not exist anymore return self.json_format_dict({}, True) (project_id, device_id) = self.index[self.args.host] device = self.get_device(device_id) return self.json_format_dict(self.get_host_info_dict_from_device(device), True) def push(self, my_dict, key, element): ''' Push an element onto an array that may not have been defined in the dict ''' group_info = my_dict.setdefault(key, []) if isinstance(group_info, dict): host_list = group_info.setdefault('hosts', []) host_list.append(element) else: group_info.append(element) def push_group(self, my_dict, key, element): ''' Push a group as a child of another group. ''' parent_group = my_dict.setdefault(key, {}) if not isinstance(parent_group, dict): parent_group = my_dict[key] = {'hosts': parent_group} child_groups = parent_group.setdefault('children', []) if element not in child_groups: child_groups.append(element) def get_inventory_from_cache(self): ''' Reads the inventory from the cache file and returns it as a JSON object ''' cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() return json_inventory def load_index_from_cache(self): ''' Reads the index from the cache file sets self.index ''' cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index) def write_to_cache(self, data, filename): ''' Writes data in JSON format to a file ''' json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def uncammelize(self, key): temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' regex = r"[^A-Za-z0-9\_" if not self.replace_dash_in_groups: regex += r"\-" return re.sub(regex + "]", "_", word) def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) # Run the script PacketInventory() ansible-2.5.1/contrib/inventory/proxmox.py0000755000000000000000000001725713265756155020713 0ustar rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # Updated 2016 by Matt Harris # # Added support for Proxmox VE 4.x # Added support for using the Notes field of a VM to define groups and variables: # A well-formatted JSON object in the Notes field will be added to the _meta # section for that VM. In addition, the "groups" key of this JSON object may be # used to specify group membership: # # { "groups": ["utility", "databases"], "a": false, "b": true } try: import json except ImportError: import simplejson as json import os import sys from optparse import OptionParser from six import iteritems from six.moves.urllib.parse import urlencode from ansible.module_utils.urls import open_url class ProxmoxNodeList(list): def get_names(self): return [node['node'] for node in self] class ProxmoxVM(dict): def get_variables(self): variables = {} for key, value in iteritems(self): variables['proxmox_' + key] = value return variables class ProxmoxVMList(list): def __init__(self, data=None): data = [] if data is None else data for item in data: self.append(ProxmoxVM(item)) def get_names(self): return [vm['name'] for vm in self if vm['template'] != 1] def get_by_name(self, name): results = [vm for vm in self if vm['name'] == name] return results[0] if len(results) > 0 else None def get_variables(self): variables = {} for vm in self: variables[vm['name']] = vm.get_variables() return variables class ProxmoxPoolList(list): def get_names(self): return [pool['poolid'] for pool in self] class ProxmoxPool(dict): def get_members_name(self): return [member['name'] for member in self['members'] if member['template'] != 1] class ProxmoxAPI(object): def __init__(self, options): self.options = options self.credentials = None if not options.url: raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).') elif not options.username: raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).') elif not options.password: raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).') def auth(self): request_path = '{0}api2/json/access/ticket'.format(self.options.url) request_params = urlencode({ 'username': self.options.username, 'password': self.options.password, }) data = json.load(open_url(request_path, data=request_params)) self.credentials = { 'ticket': data['data']['ticket'], 'CSRFPreventionToken': data['data']['CSRFPreventionToken'], } def get(self, url, data=None): request_path = '{0}{1}'.format(self.options.url, url) headers = {'Cookie': 'PVEAuthCookie={0}'.format(self.credentials['ticket'])} request = open_url(request_path, data=data, headers=headers) response = json.load(request) return response['data'] def nodes(self): return ProxmoxNodeList(self.get('api2/json/nodes')) def vms_by_type(self, node, type): return ProxmoxVMList(self.get('api2/json/nodes/{0}/{1}'.format(node, type))) def vm_description_by_type(self, node, vm, type): return self.get('api2/json/nodes/{0}/{1}/{2}/config'.format(node, type, vm)) def node_qemu(self, node): return self.vms_by_type(node, 'qemu') def node_qemu_description(self, node, vm): return self.vm_description_by_type(node, vm, 'qemu') def node_lxc(self, node): return self.vms_by_type(node, 'lxc') def node_lxc_description(self, node, vm): return self.vm_description_by_type(node, vm, 'lxc') def pools(self): return ProxmoxPoolList(self.get('api2/json/pools')) def pool(self, poolid): return ProxmoxPool(self.get('api2/json/pools/{0}'.format(poolid))) def main_list(options): results = { 'all': { 'hosts': [], }, '_meta': { 'hostvars': {}, } } proxmox_api = ProxmoxAPI(options) proxmox_api.auth() for node in proxmox_api.nodes().get_names(): qemu_list = proxmox_api.node_qemu(node) results['all']['hosts'] += qemu_list.get_names() results['_meta']['hostvars'].update(qemu_list.get_variables()) lxc_list = proxmox_api.node_lxc(node) results['all']['hosts'] += lxc_list.get_names() results['_meta']['hostvars'].update(lxc_list.get_variables()) for vm in results['_meta']['hostvars']: vmid = results['_meta']['hostvars'][vm]['proxmox_vmid'] try: type = results['_meta']['hostvars'][vm]['proxmox_type'] except KeyError: type = 'qemu' try: description = proxmox_api.vm_description_by_type(node, vmid, type)['description'] except KeyError: description = None try: metadata = json.loads(description) except TypeError: metadata = {} except ValueError: metadata = { 'notes': description } if 'groups' in metadata: # print metadata for group in metadata['groups']: if group not in results: results[group] = { 'hosts': [] } results[group]['hosts'] += [vm] results['_meta']['hostvars'][vm].update(metadata) # pools for pool in proxmox_api.pools().get_names(): results[pool] = { 'hosts': proxmox_api.pool(pool).get_members_name(), } return results def main_host(options): proxmox_api = ProxmoxAPI(options) proxmox_api.auth() for node in proxmox_api.nodes().get_names(): qemu_list = proxmox_api.node_qemu(node) qemu = qemu_list.get_by_name(options.host) if qemu: return qemu.get_variables() return {} def main(): parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME') parser.add_option('--list', action="store_true", default=False, dest="list") parser.add_option('--host', dest="host") parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url') parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username') parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password') parser.add_option('--pretty', action="store_true", default=False, dest='pretty') (options, args) = parser.parse_args() if options.list: data = main_list(options) elif options.host: data = main_host(options) else: parser.print_help() sys.exit(1) indent = None if options.pretty: indent = 2 print(json.dumps(data, indent=indent)) if __name__ == '__main__': main() ansible-2.5.1/contrib/inventory/rackhd.py0000755000000000000000000000570613265756155020427 0ustar rootroot00000000000000#!/usr/bin/env python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . import json import os import requests import argparse RACKHD_URL = 'http://localhost:8080' class RackhdInventory(object): def __init__(self, nodeids): self._inventory = {} for nodeid in nodeids: self._load_inventory_data(nodeid) inventory = {} for (nodeid, info) in self._inventory.items(): inventory[nodeid] = (self._format_output(nodeid, info)) print(json.dumps(inventory)) def _load_inventory_data(self, nodeid): info = {} info['ohai'] = RACKHD_URL + '/api/common/nodes/{0}/catalogs/ohai'.format(nodeid) info['lookup'] = RACKHD_URL + '/api/common/lookups/?q={0}'.format(nodeid) results = {} for (key, url) in info.items(): r = requests.get(url, verify=False) results[key] = r.text self._inventory[nodeid] = results def _format_output(self, nodeid, info): try: node_info = json.loads(info['lookup']) ipaddress = '' if len(node_info) > 0: ipaddress = node_info[0]['ipAddress'] output = {'hosts': [ipaddress], 'vars': {}} for (key, result) in info.items(): output['vars'][key] = json.loads(result) output['vars']['ansible_ssh_user'] = 'monorail' except KeyError: pass return output def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--host') parser.add_argument('--list', action='store_true') return parser.parse_args() try: # check if rackhd url(ie:10.1.1.45:8080) is specified in the environment RACKHD_URL = 'http://' + str(os.environ['RACKHD_URL']) except: # use default values pass # Use the nodeid specified in the environment to limit the data returned # or return data for all available nodes nodeids = [] if (parse_args().host): try: nodeids += parse_args().host.split(',') RackhdInventory(nodeids) except: pass if (parse_args().list): try: url = RACKHD_URL + '/api/common/nodes' r = requests.get(url, verify=False) data = json.loads(r.text) for entry in data: if entry['type'] == 'compute': nodeids.append(entry['id']) RackhdInventory(nodeids) except: pass ansible-2.5.1/contrib/inventory/rax.ini0000644000000000000000000000435513265756155020110 0ustar rootroot00000000000000# Ansible Rackspace external inventory script settings # [rax] # Environment Variable: RAX_CREDS_FILE # # An optional configuration that points to a pyrax-compatible credentials # file. # # If not supplied, rax.py will look for a credentials file # at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, # and therefore requires a file formatted per the SDK's specifications. # # https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md # creds_file = ~/.rackspace_cloud_credentials # Environment Variable: RAX_REGION # # An optional environment variable to narrow inventory search # scope. If used, needs a value like ORD, DFW, SYD (a Rackspace # datacenter) and optionally accepts a comma-separated list. # regions = IAD,ORD,DFW # Environment Variable: RAX_ENV # # A configuration that will use an environment as configured in # ~/.pyrax.cfg, see # https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md # env = prod # Environment Variable: RAX_META_PREFIX # Default: meta # # A configuration that changes the prefix used for meta key/value groups. # For compatibility with ec2.py set to "tag" # meta_prefix = meta # Environment Variable: RAX_ACCESS_NETWORK # Default: public # # A configuration that will tell the inventory script to use a specific # server network to determine the ansible_ssh_host value. If no address # is found, ansible_ssh_host will not be set. Accepts a comma-separated # list of network names, the first found wins. # access_network = public # Environment Variable: RAX_ACCESS_IP_VERSION # Default: 4 # # A configuration related to "access_network" that will attempt to # determine the ansible_ssh_host value for either IPv4 or IPv6. If no # address is found, ansible_ssh_host will not be set. # Acceptable values are: 4 or 6. Values other than 4 or 6 # will be ignored, and 4 will be used. Accepts a comma separated list, # the first found wins. # access_ip_version = 4 # Environment Variable: RAX_CACHE_MAX_AGE # Default: 600 # # A configuration the changes the behavior or the inventory cache. # Inventory listing performed before this value will be returned from # the cache instead of making a full request for all inventory. Setting # this value to 0 will force a full request. # cache_max_age = 600ansible-2.5.1/contrib/inventory/rax.py0000755000000000000000000004076013265756155017764 0ustar rootroot00000000000000#!/usr/bin/env python # (c) 2013, Jesse Keating , # Matt Martz # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . """ Rackspace Cloud Inventory Authors: Jesse Keating , Matt Martz Description: Generates inventory that Ansible can understand by making API request to Rackspace Public Cloud API When run against a specific host, this script returns variables similar to: rax_os-ext-sts_task_state rax_addresses rax_links rax_image rax_os-ext-sts_vm_state rax_flavor rax_id rax_rax-bandwidth_bandwidth rax_user_id rax_os-dcf_diskconfig rax_accessipv4 rax_accessipv6 rax_progress rax_os-ext-sts_power_state rax_metadata rax_status rax_updated rax_hostid rax_name rax_created rax_tenant_id rax_loaded Configuration: rax.py can be configured using a rax.ini file or via environment variables. The rax.ini file should live in the same directory along side this script. The section header for configuration values related to this inventory plugin is [rax] [rax] creds_file = ~/.rackspace_cloud_credentials regions = IAD,ORD,DFW env = prod meta_prefix = meta access_network = public access_ip_version = 4 Each of these configurations also has a corresponding environment variable. An environment variable will override a configuration file value. creds_file: Environment Variable: RAX_CREDS_FILE An optional configuration that points to a pyrax-compatible credentials file. If not supplied, rax.py will look for a credentials file at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, and therefore requires a file formatted per the SDK's specifications. https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md regions: Environment Variable: RAX_REGION An optional environment variable to narrow inventory search scope. If used, needs a value like ORD, DFW, SYD (a Rackspace datacenter) and optionally accepts a comma-separated list. environment: Environment Variable: RAX_ENV A configuration that will use an environment as configured in ~/.pyrax.cfg, see https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md meta_prefix: Environment Variable: RAX_META_PREFIX Default: meta A configuration that changes the prefix used for meta key/value groups. For compatibility with ec2.py set to "tag" access_network: Environment Variable: RAX_ACCESS_NETWORK Default: public A configuration that will tell the inventory script to use a specific server network to determine the ansible_ssh_host value. If no address is found, ansible_ssh_host will not be set. Accepts a comma-separated list of network names, the first found wins. access_ip_version: Environment Variable: RAX_ACCESS_IP_VERSION Default: 4 A configuration related to "access_network" that will attempt to determine the ansible_ssh_host value for either IPv4 or IPv6. If no address is found, ansible_ssh_host will not be set. Acceptable values are: 4 or 6. Values other than 4 or 6 will be ignored, and 4 will be used. Accepts a comma-separated list, the first found wins. Examples: List server instances $ RAX_CREDS_FILE=~/.raxpub rax.py --list List servers in ORD datacenter only $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list List servers in ORD and DFW datacenters $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list Get server details for server named "server.example.com" $ RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com Use the instance private IP to connect (instead of public IP) $ RAX_CREDS_FILE=~/.raxpub RAX_ACCESS_NETWORK=private rax.py --list """ import os import re import sys import argparse import warnings import collections import ConfigParser from six import iteritems try: import json except ImportError: import simplejson as json try: import pyrax from pyrax.utils import slugify except ImportError: sys.exit('pyrax is required for this module') from time import time from ansible.constants import get_config from ansible.module_utils.parsing.convert_bool import boolean from ansible.module_utils.six import text_type NON_CALLABLES = (text_type, str, bool, dict, int, list, type(None)) def load_config_file(): p = ConfigParser.ConfigParser() config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rax.ini') try: p.read(config_file) except ConfigParser.Error: return None else: return p p = load_config_file() def rax_slugify(value): return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_')) def to_dict(obj): instance = {} for key in dir(obj): value = getattr(obj, key) if isinstance(value, NON_CALLABLES) and not key.startswith('_'): key = rax_slugify(key) instance[key] = value return instance def host(regions, hostname): hostvars = {} for region in regions: # Connect to the region cs = pyrax.connect_to_cloudservers(region=region) for server in cs.servers.list(): if server.name == hostname: for key, value in to_dict(server).items(): hostvars[key] = value # And finally, add an IP address hostvars['ansible_ssh_host'] = server.accessIPv4 print(json.dumps(hostvars, sort_keys=True, indent=4)) def _list_into_cache(regions): groups = collections.defaultdict(list) hostvars = collections.defaultdict(dict) images = {} cbs_attachments = collections.defaultdict(dict) prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta') try: # Ansible 2.3+ networks = get_config(p, 'rax', 'access_network', 'RAX_ACCESS_NETWORK', 'public', value_type='list') except TypeError: # Ansible 2.2.x and below # pylint: disable=unexpected-keyword-arg networks = get_config(p, 'rax', 'access_network', 'RAX_ACCESS_NETWORK', 'public', islist=True) try: try: # Ansible 2.3+ ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', 'RAX_ACCESS_IP_VERSION', 4, value_type='list')) except TypeError: # Ansible 2.2.x and below # pylint: disable=unexpected-keyword-arg ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', 'RAX_ACCESS_IP_VERSION', 4, islist=True)) except: ip_versions = [4] else: ip_versions = [v for v in ip_versions if v in [4, 6]] if not ip_versions: ip_versions = [4] # Go through all the regions looking for servers for region in regions: # Connect to the region cs = pyrax.connect_to_cloudservers(region=region) if cs is None: warnings.warn( 'Connecting to Rackspace region "%s" has caused Pyrax to ' 'return None. Is this a valid region?' % region, RuntimeWarning) continue for server in cs.servers.list(): # Create a group on region groups[region].append(server.name) # Check if group metadata key in servers' metadata group = server.metadata.get('group') if group: groups[group].append(server.name) for extra_group in server.metadata.get('groups', '').split(','): if extra_group: groups[extra_group].append(server.name) # Add host metadata for key, value in to_dict(server).items(): hostvars[server.name][key] = value hostvars[server.name]['rax_region'] = region for key, value in iteritems(server.metadata): groups['%s_%s_%s' % (prefix, key, value)].append(server.name) groups['instance-%s' % server.id].append(server.name) groups['flavor-%s' % server.flavor['id']].append(server.name) # Handle boot from volume if not server.image: if not cbs_attachments[region]: cbs = pyrax.connect_to_cloud_blockstorage(region) for vol in cbs.list(): if boolean(vol.bootable, strict=False): for attachment in vol.attachments: metadata = vol.volume_image_metadata server_id = attachment['server_id'] cbs_attachments[region][server_id] = { 'id': metadata['image_id'], 'name': slugify(metadata['image_name']) } image = cbs_attachments[region].get(server.id) if image: server.image = {'id': image['id']} hostvars[server.name]['rax_image'] = server.image hostvars[server.name]['rax_boot_source'] = 'volume' images[image['id']] = image['name'] else: hostvars[server.name]['rax_boot_source'] = 'local' try: imagegroup = 'image-%s' % images[server.image['id']] groups[imagegroup].append(server.name) groups['image-%s' % server.image['id']].append(server.name) except KeyError: try: image = cs.images.get(server.image['id']) except cs.exceptions.NotFound: groups['image-%s' % server.image['id']].append(server.name) else: images[image.id] = image.human_id groups['image-%s' % image.human_id].append(server.name) groups['image-%s' % server.image['id']].append(server.name) # And finally, add an IP address ansible_ssh_host = None # use accessIPv[46] instead of looping address for 'public' for network_name in networks: if ansible_ssh_host: break if network_name == 'public': for version_name in ip_versions: if ansible_ssh_host: break if version_name == 6 and server.accessIPv6: ansible_ssh_host = server.accessIPv6 elif server.accessIPv4: ansible_ssh_host = server.accessIPv4 if not ansible_ssh_host: addresses = server.addresses.get(network_name, []) for address in addresses: for version_name in ip_versions: if ansible_ssh_host: break if address.get('version') == version_name: ansible_ssh_host = address.get('addr') break if ansible_ssh_host: hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host if hostvars: groups['_meta'] = {'hostvars': hostvars} with open(get_cache_file_path(regions), 'w') as cache_file: json.dump(groups, cache_file) def get_cache_file_path(regions): regions_str = '.'.join([reg.strip().lower() for reg in regions]) ansible_tmp_path = os.path.join(os.path.expanduser("~"), '.ansible', 'tmp') if not os.path.exists(ansible_tmp_path): os.makedirs(ansible_tmp_path) return os.path.join(ansible_tmp_path, 'ansible-rax-%s-%s.cache' % ( pyrax.identity.username, regions_str)) def _list(regions, refresh_cache=True): cache_max_age = int(get_config(p, 'rax', 'cache_max_age', 'RAX_CACHE_MAX_AGE', 600)) if (not os.path.exists(get_cache_file_path(regions)) or refresh_cache or (time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age): # Cache file doesn't exist or older than 10m or refresh cache requested _list_into_cache(regions) with open(get_cache_file_path(regions), 'r') as cache_file: groups = json.load(cache_file) print(json.dumps(groups, sort_keys=True, indent=4)) def parse_args(): parser = argparse.ArgumentParser(description='Ansible Rackspace Cloud ' 'inventory module') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--list', action='store_true', help='List active servers') group.add_argument('--host', help='List details about the specific host') parser.add_argument('--refresh-cache', action='store_true', default=False, help=('Force refresh of cache, making API requests to' 'RackSpace (default: False - use cache files)')) return parser.parse_args() def setup(): default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials') env = get_config(p, 'rax', 'environment', 'RAX_ENV', None) if env: pyrax.set_environment(env) keyring_username = pyrax.get_setting('keyring_username') # Attempt to grab credentials from environment first creds_file = get_config(p, 'rax', 'creds_file', 'RAX_CREDS_FILE', None) if creds_file is not None: creds_file = os.path.expanduser(creds_file) else: # But if that fails, use the default location of # ~/.rackspace_cloud_credentials if os.path.isfile(default_creds_file): creds_file = default_creds_file elif not keyring_username: sys.exit('No value in environment variable %s and/or no ' 'credentials file at %s' % ('RAX_CREDS_FILE', default_creds_file)) identity_type = pyrax.get_setting('identity_type') pyrax.set_setting('identity_type', identity_type or 'rackspace') region = pyrax.get_setting('region') try: if keyring_username: pyrax.keyring_auth(keyring_username, region=region) else: pyrax.set_credential_file(creds_file, region=region) except Exception as e: sys.exit("%s: %s" % (e, e.message)) regions = [] if region: regions.append(region) else: try: # Ansible 2.3+ region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', value_type='list') except TypeError: # Ansible 2.2.x and below # pylint: disable=unexpected-keyword-arg region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', islist=True) for region in region_list: region = region.strip().upper() if region == 'ALL': regions = pyrax.regions break elif region not in pyrax.regions: sys.exit('Unsupported region %s' % region) elif region not in regions: regions.append(region) return regions def main(): args = parse_args() regions = setup() if args.list: _list(regions, refresh_cache=args.refresh_cache) elif args.host: host(regions, args.host) sys.exit(0) if __name__ == '__main__': main() ansible-2.5.1/contrib/inventory/rhv.py0000777000000000000000000000000013265756155021555 2ovirt4.pyustar rootroot00000000000000ansible-2.5.1/contrib/inventory/rudder.ini0000644000000000000000000000200413265756155020570 0ustar rootroot00000000000000# Rudder external inventory script settings # [rudder] # Your Rudder server API URL, typically: # https://rudder.local/rudder/api uri = https://rudder.local/rudder/api # By default, Rudder uses a self-signed certificate. Set this to True # to disable certificate validation. disable_ssl_certificate_validation = True # Your Rudder API token, created in the Web interface. token = aaabbbccc # Rudder API version to use, use "latest" for latest available # version. version = latest # Property to use as group name in the output. # Can generally be "id" or "displayName". group_name = displayName # Fail if there are two groups with the same name or two hosts with the # same hostname in the output. fail_if_name_collision = True # We cache the results of Rudder API in a local file cache_path = /tmp/ansible-rudder.cache # The number of seconds a cache file is considered valid. After this many # seconds, a new API call will be made, and the cache file will be updated. # Set to 0 to disable cache. cache_max_age = 500 ansible-2.5.1/contrib/inventory/rudder.py0000755000000000000000000002464713265756155020465 0ustar rootroot00000000000000#!/usr/bin/env python # Copyright (c) 2015, Normation SAS # # Inspired by the EC2 inventory plugin: # https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### ''' Rudder external inventory script ================================= Generates inventory that Ansible can understand by making API request to a Rudder server. This script is compatible with Rudder 2.10 or later. The output JSON includes all your Rudder groups, containing the hostnames of their nodes. Groups and nodes have a variable called rudder_group_id and rudder_node_id, which is the Rudder internal id of the item, allowing to identify them uniquely. Hosts variables also include your node properties, which are key => value properties set by the API and specific to each node. This script assumes there is an rudder.ini file alongside it. To specify a different path to rudder.ini, define the RUDDER_INI_PATH environment variable: export RUDDER_INI_PATH=/path/to/my_rudder.ini You have to configure your Rudder server information, either in rudder.ini or by overriding it with environment variables: export RUDDER_API_VERSION='latest' export RUDDER_API_TOKEN='my_token' export RUDDER_API_URI='https://rudder.local/rudder/api' ''' import sys import os import re import argparse import six import httplib2 as http from time import time from ansible.module_utils.six.moves import configparser from ansible.module_utils.six.moves.urllib.parse import urlparse try: import json except ImportError: import simplejson as json class RudderInventory(object): def __init__(self): ''' Main execution path ''' # Empty inventory by default self.inventory = {} # Read settings and parse CLI arguments self.read_settings() self.parse_cli_args() # Create connection self.conn = http.Http(disable_ssl_certificate_validation=self.disable_ssl_validation) # Cache if self.args.refresh_cache: self.update_cache() elif not self.is_cache_valid(): self.update_cache() else: self.load_cache() data_to_print = {} if self.args.host: data_to_print = self.get_host_info(self.args.host) elif self.args.list: data_to_print = self.get_list_info() print(self.json_format_dict(data_to_print, True)) def read_settings(self): ''' Reads the settings from the rudder.ini file ''' if six.PY2: config = configparser.SafeConfigParser() else: config = configparser.ConfigParser() rudder_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rudder.ini') rudder_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('RUDDER_INI_PATH', rudder_default_ini_path))) config.read(rudder_ini_path) self.token = os.environ.get('RUDDER_API_TOKEN', config.get('rudder', 'token')) self.version = os.environ.get('RUDDER_API_VERSION', config.get('rudder', 'version')) self.uri = os.environ.get('RUDDER_API_URI', config.get('rudder', 'uri')) self.disable_ssl_validation = config.getboolean('rudder', 'disable_ssl_certificate_validation') self.group_name = config.get('rudder', 'group_name') self.fail_if_name_collision = config.getboolean('rudder', 'fail_if_name_collision') self.cache_path = config.get('rudder', 'cache_path') self.cache_max_age = config.getint('rudder', 'cache_max_age') def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Rudder inventory') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to Rudder (default: False - use cache files)') self.args = parser.parse_args() def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' if os.path.isfile(self.cache_path): mod_time = os.path.getmtime(self.cache_path) current_time = time() if (mod_time + self.cache_max_age) > current_time: return True return False def load_cache(self): ''' Reads the cache from the cache file sets self.cache ''' cache = open(self.cache_path, 'r') json_cache = cache.read() try: self.inventory = json.loads(json_cache) except ValueError as e: self.fail_with_error('Could not parse JSON response from local cache', 'parsing local cache') def write_cache(self): ''' Writes data in JSON format to a file ''' json_data = self.json_format_dict(self.inventory, True) cache = open(self.cache_path, 'w') cache.write(json_data) cache.close() def get_nodes(self): ''' Gets the nodes list from Rudder ''' path = '/nodes?select=nodeAndPolicyServer' result = self.api_call(path) nodes = {} for node in result['data']['nodes']: nodes[node['id']] = {} nodes[node['id']]['hostname'] = node['hostname'] if 'properties' in node: nodes[node['id']]['properties'] = node['properties'] else: nodes[node['id']]['properties'] = [] return nodes def get_groups(self): ''' Gets the groups list from Rudder ''' path = '/groups' result = self.api_call(path) groups = {} for group in result['data']['groups']: groups[group['id']] = {'hosts': group['nodeIds'], 'name': self.to_safe(group[self.group_name])} return groups def update_cache(self): ''' Fetches the inventory information from Rudder and creates the inventory ''' nodes = self.get_nodes() groups = self.get_groups() inventory = {} for group in groups: # Check for name collision if self.fail_if_name_collision: if groups[group]['name'] in inventory: self.fail_with_error('Name collision on groups: "%s" appears twice' % groups[group]['name'], 'creating groups') # Add group to inventory inventory[groups[group]['name']] = {} inventory[groups[group]['name']]['hosts'] = [] inventory[groups[group]['name']]['vars'] = {} inventory[groups[group]['name']]['vars']['rudder_group_id'] = group for node in groups[group]['hosts']: # Add node to group inventory[groups[group]['name']]['hosts'].append(nodes[node]['hostname']) properties = {} for node in nodes: # Check for name collision if self.fail_if_name_collision: if nodes[node]['hostname'] in properties: self.fail_with_error('Name collision on hosts: "%s" appears twice' % nodes[node]['hostname'], 'creating hosts') # Add node properties to inventory properties[nodes[node]['hostname']] = {} properties[nodes[node]['hostname']]['rudder_node_id'] = node for node_property in nodes[node]['properties']: properties[nodes[node]['hostname']][self.to_safe(node_property['name'])] = node_property['value'] inventory['_meta'] = {} inventory['_meta']['hostvars'] = properties self.inventory = inventory if self.cache_max_age > 0: self.write_cache() def get_list_info(self): ''' Gets inventory information from local cache ''' return self.inventory def get_host_info(self, hostname): ''' Gets information about a specific host from local cache ''' if hostname in self.inventory['_meta']['hostvars']: return self.inventory['_meta']['hostvars'][hostname] else: return {} def api_call(self, path): ''' Performs an API request ''' headers = { 'X-API-Token': self.token, 'X-API-Version': self.version, 'Content-Type': 'application/json;charset=utf-8' } target = urlparse(self.uri + path) method = 'GET' body = '' try: response, content = self.conn.request(target.geturl(), method, body, headers) except: self.fail_with_error('Error connecting to Rudder server') try: data = json.loads(content) except ValueError as e: self.fail_with_error('Could not parse JSON response from Rudder API', 'reading API response') return data def fail_with_error(self, err_msg, err_operation=None): ''' Logs an error to std err for ansible-playbook to consume and exit ''' if err_operation: err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( err_msg=err_msg, err_operation=err_operation) sys.stderr.write(err_msg) sys.exit(1) def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible variable names ''' return re.sub(r'[^A-Za-z0-9\_]', '_', word) # Run the script RudderInventory() ansible-2.5.1/contrib/inventory/scaleway.ini0000644000000000000000000000177213265756155021126 0ustar rootroot00000000000000# Ansible dynamic inventory script for Scaleway cloud provider # [compute] # Fetch inventory for regions. If not defined will read the SCALEWAY_REGION environment variable # # regions = all # regions = ams1 # regions = par1, ams1 regions = par1 # Define a Scaleway token to perform required queries on the API # in order to generate inventory output. # [auth] # Token to authenticate with Scaleway's API. # If not defined will read the SCALEWAY_TOKEN environment variable # api_token = mysecrettoken # To avoid performing excessive calls to Scaleway API you can define a # cache for the plugin output. Within the time defined in seconds, latest # output will be reused. After that time, the cache will be refreshed. # [cache] cache_max_age = 60 cache_dir = '~/.ansible/tmp' [defaults] # You may want to use only public IP addresses or private IP addresses. # You can set public_ip_only configuration to get public IPs only. # If not defined defaults to retrieving private IP addresses. # public_ip_only = false ansible-2.5.1/contrib/inventory/scaleway.py0000755000000000000000000001603413265756155020777 0ustar rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- ''' External inventory script for Scaleway ==================================== Shamelessly copied from an existing inventory script. This script generates an inventory that Ansible can understand by making API requests to Scaleway API Requires some python libraries, ensure to have them installed when using this script. (pip install requests https://pypi.python.org/pypi/requests) Before using this script you may want to modify scaleway.ini config file. This script generates an Ansible hosts file with these host groups: : Defines host itself with Scaleway's hostname as group name. : Contains all hosts which has "" as tag. : Contains all hosts which are in the "" region. all: Contains all hosts defined in Scaleway. ''' # (c) 2017, Paul B. # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . import copy import os import requests import six from six.moves import configparser import sys import time import traceback try: import json except ImportError: import simplejson as json EMPTY_GROUP = { 'children': [], 'hosts': [] } class ScalewayAPI: REGIONS = ['par1', 'ams1'] def __init__(self, auth_token, region): self.session = requests.session() self.session.headers.update({ 'User-Agent': 'Ansible Python/%s' % (sys.version.split(' ')[0]) }) self.session.headers.update({ 'X-Auth-Token': auth_token.encode('latin1') }) self.base_url = 'https://cp-%s.scaleway.com' % (region) def servers(self): raw = self.session.get('/'.join([self.base_url, 'servers'])) try: response = raw.json() return self.get_resource('servers', response, raw) except ValueError: return [] def get_resource(self, resource, response, raw): raw.raise_for_status() if resource in response: return response[resource] else: raise ValueError( "Resource %s not found in Scaleway API response" % (resource)) def env_or_param(env_key, param=None, fallback=None): env_value = os.environ.get(env_key) if (param, env_value) == (None, None): return fallback elif env_value is not None: return env_value else: return param def save_cache(data, config): ''' saves item to cache ''' dpath = config.get('cache', 'cache_dir') try: cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'w') cache.write(json.dumps(data)) cache.close() except IOError as e: pass # not really sure what to do here def get_cache(cache_item, config): ''' returns cached item ''' dpath = config.get('cache', 'cache_dir') inv = {} try: cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'r') inv = cache.read() cache.close() except IOError as e: pass # not really sure what to do here return inv def cache_available(config): ''' checks if we have a 'fresh' cache available for item requested ''' if config.has_option('cache', 'cache_dir'): dpath = config.get('cache', 'cache_dir') try: existing = os.stat( '/'.join([dpath, 'scaleway_ansible_inventory.json'])) except OSError: return False if config.has_option('cache', 'cache_max_age'): maxage = config.get('cache', 'cache_max_age') else: maxage = 60 if (int(time.time()) - int(existing.st_mtime)) <= int(maxage): return True return False def generate_inv_from_api(config): try: inventory['all'] = copy.deepcopy(EMPTY_GROUP) if config.has_option('auth', 'api_token'): auth_token = config.get('auth', 'api_token') auth_token = env_or_param('SCALEWAY_TOKEN', param=auth_token) if auth_token is None: sys.stderr.write('ERROR: missing authentication token for Scaleway API') sys.exit(1) if config.has_option('compute', 'regions'): regions = config.get('compute', 'regions') if regions == 'all': regions = ScalewayAPI.REGIONS else: regions = map(str.strip, regions.split(',')) else: regions = [ env_or_param('SCALEWAY_REGION', fallback='par1') ] for region in regions: api = ScalewayAPI(auth_token, region) for server in api.servers(): hostname = server['hostname'] if config.has_option('defaults', 'public_ip_only') and config.getboolean('defaults', 'public_ip_only'): ip = server['public_ip']['address'] else: ip = server['private_ip'] for server_tag in server['tags']: if server_tag not in inventory: inventory[server_tag] = copy.deepcopy(EMPTY_GROUP) inventory[server_tag]['children'].append(hostname) if region not in inventory: inventory[region] = copy.deepcopy(EMPTY_GROUP) inventory[region]['children'].append(hostname) inventory['all']['children'].append(hostname) inventory[hostname] = [] inventory[hostname].append(ip) return inventory except Exception: # Return empty hosts output traceback.print_exc() return {'all': {'hosts': []}, '_meta': {'hostvars': {}}} def get_inventory(config): ''' Reads the inventory from cache or Scaleway api ''' if cache_available(config): inv = get_cache('scaleway_ansible_inventory.json', config) else: inv = generate_inv_from_api(config) save_cache(inv, config) return json.dumps(inv) if __name__ == '__main__': inventory = {} # Read config if six.PY3: config = configparser.ConfigParser() else: config = configparser.SafeConfigParser() for configfilename in [os.path.abspath(sys.argv[0]).rsplit('.py')[0] + '.ini', 'scaleway.ini']: if os.path.exists(configfilename): config.read(configfilename) break if cache_available(config): inventory = get_cache('scaleway_ansible_inventory.json', config) else: inventory = get_inventory(config) # return to ansible sys.stdout.write(str(inventory)) sys.stdout.flush() ansible-2.5.1/contrib/inventory/serf.py0000755000000000000000000000573013265756155020127 0ustar rootroot00000000000000#!/usr/bin/env python # (c) 2015, Marc Abramowitz # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Dynamic inventory script which lets you use nodes discovered by Serf # (https://serfdom.io/). # # Requires the `serfclient` Python module from # https://pypi.python.org/pypi/serfclient # # Environment variables # --------------------- # - `SERF_RPC_ADDR` # - `SERF_RPC_AUTH` # # These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr import argparse import collections import os import sys # https://pypi.python.org/pypi/serfclient from serfclient import SerfClient, EnvironmentConfig try: import json except ImportError: import simplejson as json _key = 'serf' def _serf_client(): env = EnvironmentConfig() return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key) def get_serf_members_data(): return _serf_client().members().body['Members'] def get_nodes(data): return [node['Name'] for node in data] def get_groups(data): groups = collections.defaultdict(list) for node in data: for key, value in node['Tags'].items(): groups[value].append(node['Name']) return groups def get_meta(data): meta = {'hostvars': {}} for node in data: meta['hostvars'][node['Name']] = node['Tags'] return meta def print_list(): data = get_serf_members_data() nodes = get_nodes(data) groups = get_groups(data) meta = get_meta(data) inventory_data = {_key: nodes, '_meta': meta} inventory_data.update(groups) print(json.dumps(inventory_data)) def print_host(host): data = get_serf_members_data() meta = get_meta(data) print(json.dumps(meta['hostvars'][host])) def get_args(args_list): parser = argparse.ArgumentParser( description='ansible inventory script reading from serf cluster') mutex_group = parser.add_mutually_exclusive_group(required=True) help_list = 'list all hosts from serf cluster' mutex_group.add_argument('--list', action='store_true', help=help_list) help_host = 'display variables for a host' mutex_group.add_argument('--host', help=help_host) return parser.parse_args(args_list) def main(args_list): args = get_args(args_list) if args.list: print_list() if args.host: print_host(args.host) if __name__ == '__main__': main(sys.argv[1:]) ansible-2.5.1/contrib/inventory/softlayer.py0000755000000000000000000001600413265756155021174 0ustar rootroot00000000000000#!/usr/bin/env python """ SoftLayer external inventory script. The SoftLayer Python API client is required. Use `pip install softlayer` to install it. You have a few different options for configuring your username and api_key. You can pass environment variables (SL_USERNAME and SL_API_KEY). You can also write INI file to ~/.softlayer or /etc/softlayer.conf. For more information see the SL API at: - https://softlayer-python.readthedocs.org/en/latest/config_file.html The SoftLayer Python client has a built in command for saving this configuration file via the command `sl config setup`. """ # Copyright (C) 2014 AJ Bourg # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # # I found the structure of the ec2.py script very helpful as an example # as I put this together. Thanks to whoever wrote that script! # import SoftLayer import re import argparse import itertools try: import json except: import simplejson as json class SoftLayerInventory(object): common_items = [ 'id', 'globalIdentifier', 'hostname', 'domain', 'fullyQualifiedDomainName', 'primaryBackendIpAddress', 'primaryIpAddress', 'datacenter', 'tagReferences.tag.name', 'userData.value', ] vs_items = [ 'lastKnownPowerState.name', 'powerState', 'maxCpu', 'maxMemory', 'activeTransaction.transactionStatus[friendlyName,name]', 'status', ] hw_items = [ 'hardwareStatusId', 'processorPhysicalCoreAmount', 'memoryCapacity', ] def _empty_inventory(self): return {"_meta": {"hostvars": {}}} def __init__(self): '''Main path''' self.inventory = self._empty_inventory() self.parse_options() if self.args.list: self.get_all_servers() print(self.json_format_dict(self.inventory, True)) elif self.args.host: self.get_virtual_servers() print(self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True)) def to_safe(self, word): '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups''' return re.sub(r"[^A-Za-z0-9\-\.]", "_", word) def push(self, my_dict, key, element): '''Push an element onto an array that may not have been defined in the dict''' if key in my_dict: my_dict[key].append(element) else: my_dict[key] = [element] def parse_options(self): '''Parse all the arguments from the CLI''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on SoftLayer') parser.add_argument('--list', action='store_true', default=False, help='List instances (default: False)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') self.args = parser.parse_args() def json_format_dict(self, data, pretty=False): '''Converts a dict to a JSON object and dumps it as a formatted string''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) def process_instance(self, instance, instance_type="virtual"): '''Populate the inventory dictionary with any instance information''' # only want active instances if 'status' in instance and instance['status']['name'] != 'Active': return # and powered on instances if 'powerState' in instance and instance['powerState']['name'] != 'Running': return # 5 is active for hardware... see https://forums.softlayer.com/forum/softlayer-developer-network/general-discussion/2955-hardwarestatusid if 'hardwareStatusId' in instance and instance['hardwareStatusId'] != 5: return # if there's no IP address, we can't reach it if 'primaryIpAddress' not in instance: return instance['userData'] = instance['userData'][0]['value'] if instance['userData'] else '' dest = instance['primaryIpAddress'] self.inventory["_meta"]["hostvars"][dest] = instance # Inventory: group by memory if 'maxMemory' in instance: self.push(self.inventory, self.to_safe('memory_' + str(instance['maxMemory'])), dest) elif 'memoryCapacity' in instance: self.push(self.inventory, self.to_safe('memory_' + str(instance['memoryCapacity'])), dest) # Inventory: group by cpu count if 'maxCpu' in instance: self.push(self.inventory, self.to_safe('cpu_' + str(instance['maxCpu'])), dest) elif 'processorPhysicalCoreAmount' in instance: self.push(self.inventory, self.to_safe('cpu_' + str(instance['processorPhysicalCoreAmount'])), dest) # Inventory: group by datacenter self.push(self.inventory, self.to_safe('datacenter_' + instance['datacenter']['name']), dest) # Inventory: group by hostname self.push(self.inventory, self.to_safe(instance['hostname']), dest) # Inventory: group by FQDN self.push(self.inventory, self.to_safe(instance['fullyQualifiedDomainName']), dest) # Inventory: group by domain self.push(self.inventory, self.to_safe(instance['domain']), dest) # Inventory: group by type (hardware/virtual) self.push(self.inventory, instance_type, dest) # Inventory: group by tag for tag in instance['tagReferences']: self.push(self.inventory, tag['tag']['name'], dest) def get_virtual_servers(self): '''Get all the CCI instances''' vs = SoftLayer.VSManager(self.client) mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.vs_items)) instances = vs.list_instances(mask=mask) for instance in instances: self.process_instance(instance) def get_physical_servers(self): '''Get all the hardware instances''' hw = SoftLayer.HardwareManager(self.client) mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.hw_items)) instances = hw.list_hardware(mask=mask) for instance in instances: self.process_instance(instance, 'hardware') def get_all_servers(self): self.client = SoftLayer.Client() self.get_virtual_servers() self.get_physical_servers() SoftLayerInventory() ansible-2.5.1/contrib/inventory/spacewalk.ini0000644000000000000000000000114513265756155021262 0ustar rootroot00000000000000# Put this ini-file in the same directory as spacewalk.py # Command line options have precedence over options defined in here. [spacewalk] # To limit the script on one organization in spacewalk, uncomment org_number # and fill in the organization ID: # org_number=2 # To prefix the group names with the organization ID set prefix_org_name=true. # This is convenient when org_number is not set and you have the same group names # in multiple organizations within spacewalk # The prefix is "org_number-" prefix_org_name=false # Default cache_age for files created with spacewalk-report is 300sec. cache_age=300 ansible-2.5.1/contrib/inventory/spacewalk.py0000755000000000000000000002107613265756155021143 0ustar rootroot00000000000000#!/usr/bin/env python """ Spacewalk external inventory script ================================= Ansible has a feature where instead of reading from /etc/ansible/hosts as a text file, it can query external programs to obtain the list of hosts, groups the hosts are in, and even variables to assign to each host. To use this, copy this file over /etc/ansible/hosts and chmod +x the file. This, more or less, allows you to keep one central database containing info about all of your managed instances. This script is dependent upon the spacealk-reports package being installed on the same machine. It is basically a CSV-to-JSON converter from the output of "spacewalk-report system-groups-systems|inventory". Tested with Ansible 1.9.2 and spacewalk 2.3 """ # # Author:: Jon Miller # Copyright:: Copyright (c) 2013, Jon Miller # # Extended for support of multiple organizations and # adding the "_meta" dictionary to --list output by # Bernhard Lichtinger 2015 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or (at # your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # from __future__ import print_function import sys import os import time from optparse import OptionParser import subprocess import ConfigParser from six import iteritems try: import json except: import simplejson as json base_dir = os.path.dirname(os.path.realpath(__file__)) default_ini_file = os.path.join(base_dir, "spacewalk.ini") SW_REPORT = '/usr/bin/spacewalk-report' CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports") CACHE_AGE = 300 # 5min INI_FILE = os.path.expanduser(os.path.expandvars(os.environ.get("SPACEWALK_INI_PATH", default_ini_file))) # Sanity check if not os.path.exists(SW_REPORT): print('Error: %s is required for operation.' % (SW_REPORT), file=sys.stderr) sys.exit(1) # Pre-startup work if not os.path.exists(CACHE_DIR): os.mkdir(CACHE_DIR) os.chmod(CACHE_DIR, 0o2775) # Helper functions # ------------------------------ def spacewalk_report(name): """Yield a dictionary form of each CSV output produced by the specified spacewalk-report """ cache_filename = os.path.join(CACHE_DIR, name) if not os.path.exists(cache_filename) or \ (time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE: # Update the cache fh = open(cache_filename, 'w') p = subprocess.Popen([SW_REPORT, name], stdout=fh) p.wait() fh.close() lines = open(cache_filename, 'r').readlines() keys = lines[0].strip().split(',') # add 'spacewalk_' prefix to the keys keys = ['spacewalk_' + key for key in keys] for line in lines[1:]: values = line.strip().split(',') if len(keys) == len(values): yield dict(zip(keys, values)) # Options # ------------------------------ parser = OptionParser(usage="%prog [options] --list | --host ") parser.add_option('--list', default=False, dest="list", action="store_true", help="Produce a JSON consumable grouping of servers for Ansible") parser.add_option('--host', default=None, dest="host", help="Generate additional host specific details for given host for Ansible") parser.add_option('-H', '--human', dest="human", default=False, action="store_true", help="Produce a friendlier version of either server list or host detail") parser.add_option('-o', '--org', default=None, dest="org_number", help="Limit to spacewalk organization number") parser.add_option('-p', default=False, dest="prefix_org_name", action="store_true", help="Prefix the group name with the organization number") (options, args) = parser.parse_args() # read spacewalk.ini if present # ------------------------------ if os.path.exists(INI_FILE): config = ConfigParser.SafeConfigParser() config.read(INI_FILE) if config.has_option('spacewalk', 'cache_age'): CACHE_AGE = config.get('spacewalk', 'cache_age') if not options.org_number and config.has_option('spacewalk', 'org_number'): options.org_number = config.get('spacewalk', 'org_number') if not options.prefix_org_name and config.has_option('spacewalk', 'prefix_org_name'): options.prefix_org_name = config.getboolean('spacewalk', 'prefix_org_name') # Generate dictionary for mapping group_id to org_id # ------------------------------ org_groups = {} try: for group in spacewalk_report('system-groups'): org_groups[group['spacewalk_group_id']] = group['spacewalk_org_id'] except (OSError) as e: print('Problem executing the command "%s system-groups": %s' % (SW_REPORT, str(e)), file=sys.stderr) sys.exit(2) # List out the known server from Spacewalk # ------------------------------ if options.list: # to build the "_meta"-Group with hostvars first create dictionary for later use host_vars = {} try: for item in spacewalk_report('inventory'): host_vars[item['spacewalk_profile_name']] = dict((key, (value.split(';') if ';' in value else value)) for key, value in item.items()) except (OSError) as e: print('Problem executing the command "%s inventory": %s' % (SW_REPORT, str(e)), file=sys.stderr) sys.exit(2) groups = {} meta = {"hostvars": {}} try: for system in spacewalk_report('system-groups-systems'): # first get org_id of system org_id = org_groups[system['spacewalk_group_id']] # shall we add the org_id as prefix to the group name: if options.prefix_org_name: prefix = org_id + "-" group_name = prefix + system['spacewalk_group_name'] else: group_name = system['spacewalk_group_name'] # if we are limited to one organization: if options.org_number: if org_id == options.org_number: if group_name not in groups: groups[group_name] = set() groups[group_name].add(system['spacewalk_server_name']) if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]: meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']] # or we list all groups and systems: else: if group_name not in groups: groups[group_name] = set() groups[group_name].add(system['spacewalk_server_name']) if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]: meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']] except (OSError) as e: print('Problem executing the command "%s system-groups-systems": %s' % (SW_REPORT, str(e)), file=sys.stderr) sys.exit(2) if options.human: for group, systems in iteritems(groups): print('[%s]\n%s\n' % (group, '\n'.join(systems))) else: final = dict([(k, list(s)) for k, s in iteritems(groups)]) final["_meta"] = meta print(json.dumps(final)) # print(json.dumps(groups)) sys.exit(0) # Return a details information concerning the spacewalk server # ------------------------------ elif options.host: host_details = {} try: for system in spacewalk_report('inventory'): if system['spacewalk_hostname'] == options.host: host_details = system break except (OSError) as e: print('Problem executing the command "%s inventory": %s' % (SW_REPORT, str(e)), file=sys.stderr) sys.exit(2) if options.human: print('Host: %s' % options.host) for k, v in iteritems(host_details): print(' %s: %s' % (k, '\n '.join(v.split(';')))) else: print(json.dumps(dict((key, (value.split(';') if ';' in value else value)) for key, value in host_details.items()))) sys.exit(0) else: parser.print_help() sys.exit(1) ansible-2.5.1/contrib/inventory/ssh_config.py0000755000000000000000000000772213265756155021315 0ustar rootroot00000000000000#!/usr/bin/env python # (c) 2014, Tomas Karasek # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Dynamic inventory script which lets you use aliases from ~/.ssh/config. # # There were some issues with various Paramiko versions. I took a deeper look # and tested heavily. Now, ansible parses this alright with Paramiko versions # 1.7.2 to 1.15.2. # # It prints inventory based on parsed ~/.ssh/config. You can refer to hosts # with their alias, rather than with the IP or hostname. It takes advantage # of the ansible_ssh_{host,port,user,private_key_file}. # # If you have in your .ssh/config: # Host git # HostName git.domain.org # User tkarasek # IdentityFile /home/tomk/keys/thekey # # You can do # $ ansible git -m ping # # Example invocation: # ssh_config.py --list # ssh_config.py --host import argparse import os.path import sys from collections import MutableSequence try: import json except ImportError: import simplejson as json import paramiko SSH_CONF = '~/.ssh/config' _key = 'ssh_config' _ssh_to_ansible = [('user', 'ansible_ssh_user'), ('hostname', 'ansible_ssh_host'), ('identityfile', 'ansible_ssh_private_key_file'), ('port', 'ansible_ssh_port')] def get_config(): if not os.path.isfile(os.path.expanduser(SSH_CONF)): return {} with open(os.path.expanduser(SSH_CONF)) as f: cfg = paramiko.SSHConfig() cfg.parse(f) ret_dict = {} for d in cfg._config: if isinstance(d['host'], MutableSequence): alias = d['host'][0] else: alias = d['host'] if ('?' in alias) or ('*' in alias): continue _copy = dict(d) del _copy['host'] if 'config' in _copy: ret_dict[alias] = _copy['config'] else: ret_dict[alias] = _copy return ret_dict def print_list(): cfg = get_config() meta = {'hostvars': {}} for alias, attributes in cfg.items(): tmp_dict = {} for ssh_opt, ans_opt in _ssh_to_ansible: if ssh_opt in attributes: # If the attribute is a list, just take the first element. # Private key is returned in a list for some reason. attr = attributes[ssh_opt] if isinstance(attr, MutableSequence): attr = attr[0] tmp_dict[ans_opt] = attr if tmp_dict: meta['hostvars'][alias] = tmp_dict print(json.dumps({_key: list(set(meta['hostvars'].keys())), '_meta': meta})) def print_host(host): cfg = get_config() print(json.dumps(cfg[host])) def get_args(args_list): parser = argparse.ArgumentParser( description='ansible inventory script parsing .ssh/config') mutex_group = parser.add_mutually_exclusive_group(required=True) help_list = 'list all hosts from .ssh/config inventory' mutex_group.add_argument('--list', action='store_true', help=help_list) help_host = 'display variables for a host' mutex_group.add_argument('--host', help=help_host) return parser.parse_args(args_list) def main(args_list): args = get_args(args_list) if args.list: print_list() if args.host: print_host(args.host) if __name__ == '__main__': main(sys.argv[1:]) ansible-2.5.1/contrib/inventory/stacki.py0000755000000000000000000001421613265756155020445 0ustar rootroot00000000000000#!/usr/bin/env python # Copyright (c) 2016, Hugh Ma # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see . # Stacki inventory script # Configure stacki.yml with proper auth information and place in the following: # - ../inventory/stacki.yml # - /etc/stacki/stacki.yml # - /etc/ansible/stacki.yml # The stacki.yml file can contain entries for authentication information # regarding the Stacki front-end node. # # use_hostnames uses hostname rather than interface ip as connection # # """ Example Usage: List Stacki Nodes $ ./stack.py --list Example Configuration: --- stacki: auth: stacki_user: admin stacki_password: abc12345678910 stacki_endpoint: http://192.168.200.50/stack use_hostnames: false """ import argparse import os import sys import yaml from distutils.version import StrictVersion try: import json except: import simplejson as json try: import requests except: sys.exit('requests package is required for this inventory script') CONFIG_FILES = ['/etc/stacki/stacki.yml', '/etc/ansible/stacki.yml'] def stack_auth(params): endpoint = params['stacki_endpoint'] auth_creds = {'USERNAME': params['stacki_user'], 'PASSWORD': params['stacki_password']} client = requests.session() client.get(endpoint) init_csrf = client.cookies['csrftoken'] header = {'csrftoken': init_csrf, 'X-CSRFToken': init_csrf, 'Content-type': 'application/x-www-form-urlencoded'} login_endpoint = endpoint + "/login" login_req = client.post(login_endpoint, data=auth_creds, headers=header) csrftoken = login_req.cookies['csrftoken'] sessionid = login_req.cookies['sessionid'] auth_creds.update(CSRFTOKEN=csrftoken, SESSIONID=sessionid) return client, auth_creds def stack_build_header(auth_creds): header = {'csrftoken': auth_creds['CSRFTOKEN'], 'X-CSRFToken': auth_creds['CSRFTOKEN'], 'sessionid': auth_creds['SESSIONID'], 'Content-type': 'application/json'} return header def stack_host_list(endpoint, header, client): stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host"}), headers=header) return json.loads(stack_r.json()) def stack_net_list(endpoint, header, client): stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host interface"}), headers=header) return json.loads(stack_r.json()) def format_meta(hostdata, intfdata, config): use_hostnames = config['use_hostnames'] meta = dict(all=dict(hosts=list()), frontends=dict(hosts=list()), backends=dict(hosts=list()), _meta=dict(hostvars=dict())) # Iterate through list of dicts of hosts and remove # environment key as it causes conflicts for host in hostdata: del host['environment'] meta['_meta']['hostvars'][host['host']] = host meta['_meta']['hostvars'][host['host']]['interfaces'] = list() # @bbyhuy to improve readability in next iteration for intf in intfdata: if intf['host'] in meta['_meta']['hostvars']: meta['_meta']['hostvars'][intf['host']]['interfaces'].append(intf) if intf['default'] is True: meta['_meta']['hostvars'][intf['host']]['ansible_host'] = intf['ip'] if not use_hostnames: meta['all']['hosts'].append(intf['ip']) if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend': meta['backends']['hosts'].append(intf['ip']) else: meta['frontends']['hosts'].append(intf['ip']) else: meta['all']['hosts'].append(intf['host']) if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend': meta['backends']['hosts'].append(intf['host']) else: meta['frontends']['hosts'].append(intf['host']) return meta def parse_args(): parser = argparse.ArgumentParser(description='Stacki Inventory Module') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--list', action='store_true', help='List active hosts') group.add_argument('--host', help='List details about the specific host') return parser.parse_args() def main(): args = parse_args() if StrictVersion(requests.__version__) < StrictVersion("2.4.3"): sys.exit('requests>=2.4.3 is required for this inventory script') try: config_files = CONFIG_FILES config_files.append(os.path.dirname(os.path.realpath(__file__)) + '/stacki.yml') config = None for cfg_file in config_files: if os.path.isfile(cfg_file): stream = open(cfg_file, 'r') config = yaml.safe_load(stream) break if not config: sys.stderr.write("No config file found at {0}\n".format(config_files)) sys.exit(1) client, auth_creds = stack_auth(config['stacki']['auth']) header = stack_build_header(auth_creds) host_list = stack_host_list(config['stacki']['auth']['stacki_endpoint'], header, client) intf_list = stack_net_list(config['stacki']['auth']['stacki_endpoint'], header, client) final_meta = format_meta(host_list, intf_list, config) print(json.dumps(final_meta, indent=4)) except Exception as e: sys.stderr.write('%s\n' % e.message) sys.exit(1) sys.exit(0) if __name__ == '__main__': main() ansible-2.5.1/contrib/inventory/stacki.yml0000644000000000000000000000026113265756155020606 0ustar rootroot00000000000000--- stacki: auth: stacki_user: admin stacki_password: GhYgWut1hfGbbnstmbW3m-bJbeME-3EvC20rF1LHrDM stacki_endpoint: http://192.168.200.50/stack use_hostnames: falseansible-2.5.1/contrib/inventory/vagrant.py0000755000000000000000000001004513265756155020625 0ustar rootroot00000000000000#!/usr/bin/env python """ Vagrant external inventory script. Automatically finds the IP of the booted vagrant vm(s), and returns it under the host group 'vagrant' Example Vagrant configuration using this script: config.vm.provision :ansible do |ansible| ansible.playbook = "./provision/your_playbook.yml" ansible.inventory_file = "./provision/inventory/vagrant.py" ansible.verbose = true end """ # Copyright (C) 2013 Mark Mandel # 2015 Igor Khomyakov # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # # Thanks to the spacewalk.py inventory script for giving me the basic structure # of this. # import sys import os.path import subprocess import re from paramiko import SSHConfig from optparse import OptionParser from collections import defaultdict try: import json except Exception: import simplejson as json from ansible.module_utils._text import to_text from ansible.module_utils.six.moves import StringIO _group = 'vagrant' # a default group _ssh_to_ansible = [('user', 'ansible_ssh_user'), ('hostname', 'ansible_ssh_host'), ('identityfile', 'ansible_ssh_private_key_file'), ('port', 'ansible_ssh_port')] # Options # ------------------------------ parser = OptionParser(usage="%prog [options] --list | --host ") parser.add_option('--list', default=False, dest="list", action="store_true", help="Produce a JSON consumable grouping of Vagrant servers for Ansible") parser.add_option('--host', default=None, dest="host", help="Generate additional host specific details for given host for Ansible") (options, args) = parser.parse_args() # # helper functions # # get all the ssh configs for all boxes in an array of dictionaries. def get_ssh_config(): return dict((k, get_a_ssh_config(k)) for k in list_running_boxes()) # list all the running boxes def list_running_boxes(): output = to_text(subprocess.check_output(["vagrant", "status"]), errors='surrogate_or_strict').split('\n') boxes = [] for line in output: matcher = re.search(r"([^\s]+)[\s]+running \(.+", line) if matcher: boxes.append(matcher.group(1)) return boxes # get the ssh config for a single box def get_a_ssh_config(box_name): """Gives back a map of all the machine's ssh configurations""" output = to_text(subprocess.check_output(["vagrant", "ssh-config", box_name]), errors='surrogate_or_strict') config = SSHConfig() config.parse(StringIO(output)) host_config = config.lookup(box_name) # man 5 ssh_config: # > It is possible to have multiple identity files ... # > all these identities will be tried in sequence. for id in host_config['identityfile']: if os.path.isfile(id): host_config['identityfile'] = id return dict((v, host_config[k]) for k, v in _ssh_to_ansible) # List out servers that vagrant has running # ------------------------------ if options.list: ssh_config = get_ssh_config() meta = defaultdict(dict) for host in ssh_config: meta['hostvars'][host] = ssh_config[host] print(json.dumps({_group: list(ssh_config.keys()), '_meta': meta})) sys.exit(0) # Get out the host details # ------------------------------ elif options.host: print(json.dumps(get_a_ssh_config(options.host))) sys.exit(0) # Print out help # ------------------------------ else: parser.print_help() sys.exit(0) ansible-2.5.1/contrib/inventory/vbox.py0000755000000000000000000000633613265756155020151 0ustar rootroot00000000000000#!/usr/bin/env python # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . import sys from subprocess import Popen, PIPE try: import json except ImportError: import simplejson as json class SetEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, set): return list(obj) return json.JSONEncoder.default(self, obj) VBOX = "VBoxManage" def get_hosts(host=None): returned = {} try: if host: p = Popen([VBOX, 'showvminfo', host], stdout=PIPE) else: returned = {'all': set(), '_metadata': {}} p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE) except: sys.exit(1) hostvars = {} prevkey = pref_k = '' for line in p.stdout.readlines(): try: k, v = line.split(':', 1) except: continue if k == '': continue v = v.strip() if k.startswith('Name'): if v not in hostvars: curname = v hostvars[curname] = {} try: # try to get network info x = Popen([VBOX, 'guestproperty', 'get', curname, "/VirtualBox/GuestInfo/Net/0/V4/IP"], stdout=PIPE) ipinfo = x.stdout.read() if 'Value' in ipinfo: a, ip = ipinfo.split(':', 1) hostvars[curname]['ansible_ssh_host'] = ip.strip() except: pass continue if not host: if k == 'Groups': for group in v.split('/'): if group: if group not in returned: returned[group] = set() returned[group].add(curname) returned['all'].add(curname) continue pref_k = 'vbox_' + k.strip().replace(' ', '_') if k.startswith(' '): if prevkey not in hostvars[curname]: hostvars[curname][prevkey] = {} hostvars[curname][prevkey][pref_k] = v else: if v != '': hostvars[curname][pref_k] = v prevkey = pref_k if not host: returned['_metadata']['hostvars'] = hostvars else: returned = hostvars[host] return returned if __name__ == '__main__': inventory = {} hostname = None if len(sys.argv) > 1: if sys.argv[1] == "--host": hostname = sys.argv[2] if hostname: inventory = get_hosts(hostname) else: inventory = get_hosts() sys.stdout.write(json.dumps(inventory, indent=2, cls=SetEncoder)) ansible-2.5.1/contrib/inventory/vmware.ini0000644000000000000000000000334313265756155020613 0ustar rootroot00000000000000# Ansible VMware external inventory script settings [defaults] # If true (the default), return only guest VMs. If false, also return host # systems in the results. guests_only = True # Specify an alternate group name for guest VMs. If not defined, defaults to # the basename of the inventory script + "_vm", e.g. "vmware_vm". #vm_group = vm_group_name # Specify an alternate group name for host systems when guests_only=false. # If not defined, defaults to the basename of the inventory script + "_hw", # e.g. "vmware_hw". #hw_group = hw_group_name # Specify the number of seconds to use the inventory cache before it is # considered stale. If not defined, defaults to 0 seconds. #cache_max_age = 3600 # Specify the directory used for storing the inventory cache. If not defined, # caching will be disabled. #cache_dir = ~/.cache/ansible # Specify a prefix filter. Any VMs with names beginning with this string will # not be returned. # prefix_filter = test_ # Specify a cluster filter list (colon delimited). Only clusters matching by # name will be scanned for virtualmachines #clusters = cluster1,cluster2 [auth] # Specify hostname or IP address of vCenter/ESXi server. A port may be # included with the hostname, e.g.: vcenter.example.com:8443. This setting # may also be defined via the VMWARE_HOST environment variable. host = vcenter.example.com # Specify a username to access the vCenter host. This setting may also be # defined with the VMWARE_USER environment variable. user = ihasaccess # Specify a password to access the vCenter host. This setting may also be # defined with the VMWARE_PASSWORD environment variable. password = ssshverysecret # Force SSL certificate checking by default or ignore self-signed certs. #sslcheck=True ansible-2.5.1/contrib/inventory/vmware.py0000755000000000000000000004405413265756155020473 0ustar rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- ''' VMware Inventory Script ======================= Retrieve information about virtual machines from a vCenter server or standalone ESX host. When `group_by=false` (in the INI file), host systems are also returned in addition to VMs. This script will attempt to read configuration from an INI file with the same base filename if present, or `vmware.ini` if not. It is possible to create symlinks to the inventory script to support multiple configurations, e.g.: * `vmware.py` (this script) * `vmware.ini` (default configuration, will be read by `vmware.py`) * `vmware_test.py` (symlink to `vmware.py`) * `vmware_test.ini` (test configuration, will be read by `vmware_test.py`) * `vmware_other.py` (symlink to `vmware.py`, will read `vmware.ini` since no `vmware_other.ini` exists) The path to an INI file may also be specified via the `VMWARE_INI` environment variable, in which case the filename matching rules above will not apply. Host and authentication parameters may be specified via the `VMWARE_HOST`, `VMWARE_USER` and `VMWARE_PASSWORD` environment variables; these options will take precedence over options present in the INI file. An INI file is not required if these options are specified using environment variables. ''' from __future__ import print_function import collections import json import logging import optparse import os import ssl import sys import time from six import integer_types, text_type, string_types from six.moves import configparser # Disable logging message trigged by pSphere/suds. try: from logging import NullHandler except ImportError: from logging import Handler class NullHandler(Handler): def emit(self, record): pass logging.getLogger('psphere').addHandler(NullHandler()) logging.getLogger('suds').addHandler(NullHandler()) from psphere.client import Client from psphere.errors import ObjectNotFoundError from psphere.managedobjects import HostSystem, VirtualMachine, ManagedObject, Network, ClusterComputeResource from suds.sudsobject import Object as SudsObject class VMwareInventory(object): def __init__(self, guests_only=None): self.config = configparser.SafeConfigParser() if os.environ.get('VMWARE_INI', ''): config_files = [os.environ['VMWARE_INI']] else: config_files = [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini'] for config_file in config_files: if os.path.exists(config_file): self.config.read(config_file) break # Retrieve only guest VMs, or include host systems? if guests_only is not None: self.guests_only = guests_only elif self.config.has_option('defaults', 'guests_only'): self.guests_only = self.config.getboolean('defaults', 'guests_only') else: self.guests_only = True # Read authentication information from VMware environment variables # (if set), otherwise from INI file. auth_host = os.environ.get('VMWARE_HOST') if not auth_host and self.config.has_option('auth', 'host'): auth_host = self.config.get('auth', 'host') auth_user = os.environ.get('VMWARE_USER') if not auth_user and self.config.has_option('auth', 'user'): auth_user = self.config.get('auth', 'user') auth_password = os.environ.get('VMWARE_PASSWORD') if not auth_password and self.config.has_option('auth', 'password'): auth_password = self.config.get('auth', 'password') sslcheck = os.environ.get('VMWARE_SSLCHECK') if not sslcheck and self.config.has_option('auth', 'sslcheck'): sslcheck = self.config.get('auth', 'sslcheck') if not sslcheck: sslcheck = True else: if sslcheck.lower() in ['no', 'false']: sslcheck = False else: sslcheck = True # Limit the clusters being scanned self.filter_clusters = os.environ.get('VMWARE_CLUSTERS') if not self.filter_clusters and self.config.has_option('defaults', 'clusters'): self.filter_clusters = self.config.get('defaults', 'clusters') if self.filter_clusters: self.filter_clusters = [x.strip() for x in self.filter_clusters.split(',') if x.strip()] # Override certificate checks if not sslcheck: if hasattr(ssl, '_create_unverified_context'): ssl._create_default_https_context = ssl._create_unverified_context # Create the VMware client connection. self.client = Client(auth_host, auth_user, auth_password) def _put_cache(self, name, value): ''' Saves the value to cache with the name given. ''' if self.config.has_option('defaults', 'cache_dir'): cache_dir = os.path.expanduser(self.config.get('defaults', 'cache_dir')) if not os.path.exists(cache_dir): os.makedirs(cache_dir) cache_file = os.path.join(cache_dir, name) with open(cache_file, 'w') as cache: json.dump(value, cache) def _get_cache(self, name, default=None): ''' Retrieves the value from cache for the given name. ''' if self.config.has_option('defaults', 'cache_dir'): cache_dir = self.config.get('defaults', 'cache_dir') cache_file = os.path.join(cache_dir, name) if os.path.exists(cache_file): if self.config.has_option('defaults', 'cache_max_age'): cache_max_age = self.config.getint('defaults', 'cache_max_age') else: cache_max_age = 0 cache_stat = os.stat(cache_file) if (cache_stat.st_mtime + cache_max_age) >= time.time(): with open(cache_file) as cache: return json.load(cache) return default def _flatten_dict(self, d, parent_key='', sep='_'): ''' Flatten nested dicts by combining keys with a separator. Lists with only string items are included as is; any other lists are discarded. ''' items = [] for k, v in d.items(): if k.startswith('_'): continue new_key = parent_key + sep + k if parent_key else k if isinstance(v, collections.MutableMapping): items.extend(self._flatten_dict(v, new_key, sep).items()) elif isinstance(v, (list, tuple)): if all([isinstance(x, string_types) for x in v]): items.append((new_key, v)) else: items.append((new_key, v)) return dict(items) def _get_obj_info(self, obj, depth=99, seen=None): ''' Recursively build a data structure for the given pSphere object (depth only applies to ManagedObject instances). ''' seen = seen or set() if isinstance(obj, ManagedObject): try: obj_unicode = text_type(getattr(obj, 'name')) except AttributeError: obj_unicode = () if obj in seen: return obj_unicode seen.add(obj) if depth <= 0: return obj_unicode d = {} for attr in dir(obj): if attr.startswith('_'): continue try: val = getattr(obj, attr) obj_info = self._get_obj_info(val, depth - 1, seen) if obj_info != (): d[attr] = obj_info except Exception as e: pass return d elif isinstance(obj, SudsObject): d = {} for key, val in iter(obj): obj_info = self._get_obj_info(val, depth, seen) if obj_info != (): d[key] = obj_info return d elif isinstance(obj, (list, tuple)): l = [] for val in iter(obj): obj_info = self._get_obj_info(val, depth, seen) if obj_info != (): l.append(obj_info) return l elif isinstance(obj, (type(None), bool, float) + string_types + integer_types): return obj else: return () def _get_host_info(self, host, prefix='vmware'): ''' Return a flattened dict with info about the given host system. ''' host_info = { 'name': host.name, } for attr in ('datastore', 'network', 'vm'): try: value = getattr(host, attr) host_info['%ss' % attr] = self._get_obj_info(value, depth=0) except AttributeError: host_info['%ss' % attr] = [] for k, v in self._get_obj_info(host.summary, depth=0).items(): if isinstance(v, collections.MutableMapping): for k2, v2 in v.items(): host_info[k2] = v2 elif k != 'host': host_info[k] = v try: host_info['ipAddress'] = host.config.network.vnic[0].spec.ip.ipAddress except Exception as e: print(e, file=sys.stderr) host_info = self._flatten_dict(host_info, prefix) if ('%s_ipAddress' % prefix) in host_info: host_info['ansible_ssh_host'] = host_info['%s_ipAddress' % prefix] return host_info def _get_vm_info(self, vm, prefix='vmware'): ''' Return a flattened dict with info about the given virtual machine. ''' vm_info = { 'name': vm.name, } for attr in ('datastore', 'network'): try: value = getattr(vm, attr) vm_info['%ss' % attr] = self._get_obj_info(value, depth=0) except AttributeError: vm_info['%ss' % attr] = [] try: vm_info['resourcePool'] = self._get_obj_info(vm.resourcePool, depth=0) except AttributeError: vm_info['resourcePool'] = '' try: vm_info['guestState'] = vm.guest.guestState except AttributeError: vm_info['guestState'] = '' for k, v in self._get_obj_info(vm.summary, depth=0).items(): if isinstance(v, collections.MutableMapping): for k2, v2 in v.items(): if k2 == 'host': k2 = 'hostSystem' vm_info[k2] = v2 elif k != 'vm': vm_info[k] = v vm_info = self._flatten_dict(vm_info, prefix) if ('%s_ipAddress' % prefix) in vm_info: vm_info['ansible_ssh_host'] = vm_info['%s_ipAddress' % prefix] return vm_info def _add_host(self, inv, parent_group, host_name): ''' Add the host to the parent group in the given inventory. ''' p_group = inv.setdefault(parent_group, []) if isinstance(p_group, dict): group_hosts = p_group.setdefault('hosts', []) else: group_hosts = p_group if host_name not in group_hosts: group_hosts.append(host_name) def _add_child(self, inv, parent_group, child_group): ''' Add a child group to a parent group in the given inventory. ''' if parent_group != 'all': p_group = inv.setdefault(parent_group, {}) if not isinstance(p_group, dict): inv[parent_group] = {'hosts': p_group} p_group = inv[parent_group] group_children = p_group.setdefault('children', []) if child_group not in group_children: group_children.append(child_group) inv.setdefault(child_group, []) def get_inventory(self, meta_hostvars=True): ''' Reads the inventory from cache or VMware API via pSphere. ''' # Use different cache names for guests only vs. all hosts. if self.guests_only: cache_name = '__inventory_guests__' else: cache_name = '__inventory_all__' inv = self._get_cache(cache_name, None) if inv is not None: return inv inv = {'all': {'hosts': []}} if meta_hostvars: inv['_meta'] = {'hostvars': {}} default_group = os.path.basename(sys.argv[0]).rstrip('.py') if not self.guests_only: if self.config.has_option('defaults', 'hw_group'): hw_group = self.config.get('defaults', 'hw_group') else: hw_group = default_group + '_hw' if self.config.has_option('defaults', 'vm_group'): vm_group = self.config.get('defaults', 'vm_group') else: vm_group = default_group + '_vm' if self.config.has_option('defaults', 'prefix_filter'): prefix_filter = self.config.get('defaults', 'prefix_filter') else: prefix_filter = None if self.filter_clusters: # Loop through clusters and find hosts: hosts = [] for cluster in ClusterComputeResource.all(self.client): if cluster.name in self.filter_clusters: for host in cluster.host: hosts.append(host) else: # Get list of all physical hosts hosts = HostSystem.all(self.client) # Loop through physical hosts: for host in hosts: if not self.guests_only: self._add_host(inv, 'all', host.name) self._add_host(inv, hw_group, host.name) host_info = self._get_host_info(host) if meta_hostvars: inv['_meta']['hostvars'][host.name] = host_info self._put_cache(host.name, host_info) # Loop through all VMs on physical host. for vm in host.vm: if prefix_filter: if vm.name.startswith(prefix_filter): continue self._add_host(inv, 'all', vm.name) self._add_host(inv, vm_group, vm.name) vm_info = self._get_vm_info(vm) if meta_hostvars: inv['_meta']['hostvars'][vm.name] = vm_info self._put_cache(vm.name, vm_info) # Group by resource pool. vm_resourcePool = vm_info.get('vmware_resourcePool', None) if vm_resourcePool: self._add_child(inv, vm_group, 'resource_pools') self._add_child(inv, 'resource_pools', vm_resourcePool) self._add_host(inv, vm_resourcePool, vm.name) # Group by datastore. for vm_datastore in vm_info.get('vmware_datastores', []): self._add_child(inv, vm_group, 'datastores') self._add_child(inv, 'datastores', vm_datastore) self._add_host(inv, vm_datastore, vm.name) # Group by network. for vm_network in vm_info.get('vmware_networks', []): self._add_child(inv, vm_group, 'networks') self._add_child(inv, 'networks', vm_network) self._add_host(inv, vm_network, vm.name) # Group by guest OS. vm_guestId = vm_info.get('vmware_guestId', None) if vm_guestId: self._add_child(inv, vm_group, 'guests') self._add_child(inv, 'guests', vm_guestId) self._add_host(inv, vm_guestId, vm.name) # Group all VM templates. vm_template = vm_info.get('vmware_template', False) if vm_template: self._add_child(inv, vm_group, 'templates') self._add_host(inv, 'templates', vm.name) self._put_cache(cache_name, inv) return inv def get_host(self, hostname): ''' Read info about a specific host or VM from cache or VMware API. ''' inv = self._get_cache(hostname, None) if inv is not None: return inv if not self.guests_only: try: host = HostSystem.get(self.client, name=hostname) inv = self._get_host_info(host) except ObjectNotFoundError: pass if inv is None: try: vm = VirtualMachine.get(self.client, name=hostname) inv = self._get_vm_info(vm) except ObjectNotFoundError: pass if inv is not None: self._put_cache(hostname, inv) return inv or {} def main(): parser = optparse.OptionParser() parser.add_option('--list', action='store_true', dest='list', default=False, help='Output inventory groups and hosts') parser.add_option('--host', dest='host', default=None, metavar='HOST', help='Output variables only for the given hostname') # Additional options for use when running the script standalone, but never # used by Ansible. parser.add_option('--pretty', action='store_true', dest='pretty', default=False, help='Output nicely-formatted JSON') parser.add_option('--include-host-systems', action='store_true', dest='include_host_systems', default=False, help='Include host systems in addition to VMs') parser.add_option('--no-meta-hostvars', action='store_false', dest='meta_hostvars', default=True, help='Exclude [\'_meta\'][\'hostvars\'] with --list') options, args = parser.parse_args() if options.include_host_systems: vmware_inventory = VMwareInventory(guests_only=False) else: vmware_inventory = VMwareInventory() if options.host is not None: inventory = vmware_inventory.get_host(options.host) else: inventory = vmware_inventory.get_inventory(options.meta_hostvars) json_kwargs = {} if options.pretty: json_kwargs.update({'indent': 4, 'sort_keys': True}) json.dump(inventory, sys.stdout, **json_kwargs) if __name__ == '__main__': main() ansible-2.5.1/contrib/inventory/vmware_inventory.ini0000644000000000000000000001171713265756155022734 0ustar rootroot00000000000000# Ansible VMware external inventory script settings [vmware] # The resolvable hostname or ip address of the vsphere server=vcenter # The port for the vsphere API #port=443 # The username with access to the vsphere API. This setting # may also be defined via the VMWARE_USERNAME environment variable. username=administrator@vsphere.local # The password for the vsphere API. This setting # may also be defined via the VMWARE_PASSWORD environment variable. password=vmware # Verify the server's SSL certificate #validate_certs = True # Specify the number of seconds to use the inventory cache before it is # considered stale. If not defined, defaults to 0 seconds. #cache_max_age = 3600 # Specify the directory used for storing the inventory cache. If not defined, # caching will be disabled. #cache_path = ~/.cache/ansible # Max object level refers to the level of recursion the script will delve into # the objects returned from pyvomi to find serializable facts. The default # level of 0 is sufficient for most tasks and will be the most performant. # Beware that the recursion can exceed python's limit (causing traceback), # cause sluggish script performance and return huge blobs of facts. # If you do not know what you are doing, leave this set to 1. #max_object_level=1 # Lower the keynames for facts to make addressing them easier. #lower_var_keys=True # Don't retrieve and process some VMware attribute keys # Default values permit to sanitize inventory meta and to improve a little bit # performance by removing non-common group attributes. #skip_keys = declaredalarmstate,disabledmethod,dynamicproperty,dynamictype,environmentbrowser,managedby,parent,childtype,resourceconfig # Host alias for objects in the inventory. VMWare allows duplicate VM names # so they can not be considered unique. Use this setting to alter the alias # returned for the hosts. Any atributes for the guest can be used to build # this alias. The default combines the config name and the config uuid and # expects that the ansible_host will be set by the host_pattern. #alias_pattern={{ config.name + '_' + config.uuid }} # Host pattern is the value set for ansible_host and ansible_ssh_host, which # needs to be a hostname or ipaddress the ansible controlhost can reach. #host_pattern={{ guest.ipaddress }} # Host filters are a comma separated list of jinja patterns to remove # non-matching hosts from the final result. # EXAMPLES: # host_filters={{ config.guestid == 'rhel7_64Guest' }} # host_filters={{ config.cpuhotremoveenabled != False }},{{ runtime.maxmemoryusage >= 512 }} # host_filters={{ config.cpuhotremoveenabled != False }},{{ runtime.maxmemoryusage >= 512 }} # host_filters={{ runtime.powerstate == "poweredOn" }} # host_filters={{ guest.gueststate == "notRunning" }} # The default value is powerstate of virtual machine equal to "poweredOn". (Changed in version 2.5) # Runtime state does not require to have vmware tools installed as compared to "guest.gueststate" #host_filters={{ runtime.powerstate == "poweredOn" }} # Groupby patterns enable the user to create groups via any possible jinja # expression. The resulting value will the groupname and the host will be added # to that group. Be careful to not make expressions that simply return True/False # because those values will become the literal group name. The patterns can be # comma delimited to create as many groups as necessary #groupby_patterns={{ guest.guestid }},{{ 'templates' if config.template else 'guests'}} # Group by custom fields will use VMware custom fields to generate hostgroups # based on {{ custom_field_group_prefix }} + field_name + _ + field_value # Set groupby_custom_field to True will enable this feature # If custom field value is comma separated, multiple groups are created. # Warning: This required max_object_level to be set to 2 or greater. #groupby_custom_field = False # You can customize prefix used by custom field hostgroups generation here. # vmware_tag_ prefix is the default and consistent with ec2_tag_ #custom_field_group_prefix = 'vmware_tag_' # The script attempts to recurse into virtualmachine objects and serialize # all available data. The serialization is comprehensive but slow. If the # vcenter environment is large and the desired properties are known, create # a 'properties' section in this config and make an arbitrary list of # key=value settings where the value is a path to a specific property. If # If this feature is enabled, be sure to fetch every property that is used # in the jinja expressions defined above. For performance tuning, reduce # the number of properties to the smallest amount possible and limit the # use of properties that are not direct attributes of vim.VirtualMachine #[properties] #prop01=name #prop02=config.cpuHotAddEnabled #prop03=config.cpuHotRemoveEnabled #prop04=config.instanceUuid #prop05=config.hardware.numCPU #prop06=config.template #prop07=config.name #prop08=guest.hostName #prop09=guest.ipAddress #prop10=guest.guestId #prop11=guest.guestState #prop12=runtime.maxMemoryUsage ansible-2.5.1/contrib/inventory/vmware_inventory.py0000755000000000000000000006610213265756155022606 0ustar rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C): 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Requirements # - pyvmomi >= 6.0.0.2016.4 # TODO: # * more jq examples # * optional folder heriarchy """ $ jq '._meta.hostvars[].config' data.json | head { "alternateguestname": "", "instanceuuid": "5035a5cd-b8e8-d717-e133-2d383eb0d675", "memoryhotaddenabled": false, "guestfullname": "Red Hat Enterprise Linux 7 (64-bit)", "changeversion": "2016-05-16T18:43:14.977925Z", "uuid": "4235fc97-5ddb-7a17-193b-9a3ac97dc7b4", "cpuhotremoveenabled": false, "vpmcenabled": false, "firmware": "bios", """ from __future__ import print_function import atexit import datetime import itertools import json import os import re import ssl import sys import uuid from time import time import six from jinja2 import Environment from six import integer_types, string_types from six.moves import configparser try: import argparse except ImportError: sys.exit('Error: This inventory script required "argparse" python module. Please install it or upgrade to python-2.7') try: from pyVmomi import vim, vmodl from pyVim.connect import SmartConnect, Disconnect except ImportError: sys.exit("ERROR: This inventory script required 'pyVmomi' Python module, it was not able to load it") def regex_match(s, pattern): '''Custom filter for regex matching''' reg = re.compile(pattern) if reg.match(s): return True else: return False def select_chain_match(inlist, key, pattern): '''Get a key from a list of dicts, squash values to a single list, then filter''' outlist = [x[key] for x in inlist] outlist = list(itertools.chain(*outlist)) outlist = [x for x in outlist if regex_match(x, pattern)] return outlist class VMwareMissingHostException(Exception): pass class VMWareInventory(object): __name__ = 'VMWareInventory' guest_props = False instances = [] debug = False load_dumpfile = None write_dumpfile = None maxlevel = 1 lowerkeys = True config = None cache_max_age = None cache_path_cache = None cache_path_index = None cache_dir = None server = None port = None username = None password = None validate_certs = True host_filters = [] skip_keys = [] groupby_patterns = [] safe_types = [bool, str, float, None] + list(integer_types) iter_types = [dict, list] bad_types = ['Array', 'disabledMethod', 'declaredAlarmState'] vimTableMaxDepth = { "vim.HostSystem": 2, "vim.VirtualMachine": 2, } custom_fields = {} # use jinja environments to allow for custom filters env = Environment() env.filters['regex_match'] = regex_match env.filters['select_chain_match'] = select_chain_match # translation table for attributes to fetch for known vim types vimTable = { vim.Datastore: ['_moId', 'name'], vim.ResourcePool: ['_moId', 'name'], vim.HostSystem: ['_moId', 'name'], } @staticmethod def _empty_inventory(): return {"_meta": {"hostvars": {}}} def __init__(self, load=True): self.inventory = VMWareInventory._empty_inventory() if load: # Read settings and parse CLI arguments self.parse_cli_args() self.read_settings() # Check the cache cache_valid = self.is_cache_valid() # Handle Cache if self.args.refresh_cache or not cache_valid: self.do_api_calls_update_cache() else: self.debugl('loading inventory from cache') self.inventory = self.get_inventory_from_cache() def debugl(self, text): if self.args.debug: try: text = str(text) except UnicodeEncodeError: text = text.encode('ascii', 'ignore') print('%s %s' % (datetime.datetime.now(), text)) def show(self): # Data to print self.debugl('dumping results') data_to_print = None if self.args.host: data_to_print = self.get_host_info(self.args.host) elif self.args.list: # Display list of instances for inventory data_to_print = self.inventory return json.dumps(data_to_print, indent=2) def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' valid = False if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: valid = True return valid def do_api_calls_update_cache(self): ''' Get instances and cache the data ''' self.inventory = self.instances_to_inventory(self.get_instances()) self.write_to_cache(self.inventory) def write_to_cache(self, data): ''' Dump inventory to json file ''' with open(self.cache_path_cache, 'wb') as f: f.write(json.dumps(data)) def get_inventory_from_cache(self): ''' Read in jsonified inventory ''' jdata = None with open(self.cache_path_cache, 'rb') as f: jdata = f.read() return json.loads(jdata) def read_settings(self): ''' Reads the settings from the vmware_inventory.ini file ''' scriptbasename = __file__ scriptbasename = os.path.basename(scriptbasename) scriptbasename = scriptbasename.replace('.py', '') defaults = {'vmware': { 'server': '', 'port': 443, 'username': '', 'password': '', 'validate_certs': True, 'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename), 'cache_name': 'ansible-vmware', 'cache_path': '~/.ansible/tmp', 'cache_max_age': 3600, 'max_object_level': 1, 'skip_keys': 'declaredalarmstate,' 'disabledmethod,' 'dynamicproperty,' 'dynamictype,' 'environmentbrowser,' 'managedby,' 'parent,' 'childtype,' 'resourceconfig', 'alias_pattern': '{{ config.name + "_" + config.uuid }}', 'host_pattern': '{{ guest.ipaddress }}', 'host_filters': '{{ runtime.powerstate == "poweredOn" }}', 'groupby_patterns': '{{ guest.guestid }},{{ "templates" if config.template else "guests"}}', 'lower_var_keys': True, 'custom_field_group_prefix': 'vmware_tag_', 'groupby_custom_field': False} } if six.PY3: config = configparser.ConfigParser() else: config = configparser.SafeConfigParser() # where is the config? vmware_ini_path = os.environ.get('VMWARE_INI_PATH', defaults['vmware']['ini_path']) vmware_ini_path = os.path.expanduser(os.path.expandvars(vmware_ini_path)) config.read(vmware_ini_path) if 'vmware' not in config.sections(): config.add_section('vmware') # apply defaults for k, v in defaults['vmware'].items(): if not config.has_option('vmware', k): config.set('vmware', k, str(v)) # where is the cache? self.cache_dir = os.path.expanduser(config.get('vmware', 'cache_path')) if self.cache_dir and not os.path.exists(self.cache_dir): os.makedirs(self.cache_dir) # set the cache filename and max age cache_name = config.get('vmware', 'cache_name') self.cache_path_cache = self.cache_dir + "/%s.cache" % cache_name self.debugl('cache path is %s' % self.cache_path_cache) self.cache_max_age = int(config.getint('vmware', 'cache_max_age')) # mark the connection info self.server = os.environ.get('VMWARE_SERVER', config.get('vmware', 'server')) self.debugl('server is %s' % self.server) self.port = int(os.environ.get('VMWARE_PORT', config.get('vmware', 'port'))) self.username = os.environ.get('VMWARE_USERNAME', config.get('vmware', 'username')) self.debugl('username is %s' % self.username) self.password = os.environ.get('VMWARE_PASSWORD', config.get('vmware', 'password', raw=True)) self.validate_certs = os.environ.get('VMWARE_VALIDATE_CERTS', config.get('vmware', 'validate_certs')) if self.validate_certs in ['no', 'false', 'False', False]: self.validate_certs = False self.debugl('cert validation is %s' % self.validate_certs) # behavior control self.maxlevel = int(config.get('vmware', 'max_object_level')) self.debugl('max object level is %s' % self.maxlevel) self.lowerkeys = config.get('vmware', 'lower_var_keys') if type(self.lowerkeys) != bool: if str(self.lowerkeys).lower() in ['yes', 'true', '1']: self.lowerkeys = True else: self.lowerkeys = False self.debugl('lower keys is %s' % self.lowerkeys) self.skip_keys = list(config.get('vmware', 'skip_keys').split(',')) self.debugl('skip keys is %s' % self.skip_keys) self.host_filters = list(config.get('vmware', 'host_filters').split(',')) self.debugl('host filters are %s' % self.host_filters) self.groupby_patterns = list(config.get('vmware', 'groupby_patterns').split(',')) self.debugl('groupby patterns are %s' % self.groupby_patterns) # Special feature to disable the brute force serialization of the # virtulmachine objects. The key name for these properties does not # matter because the values are just items for a larger list. if config.has_section('properties'): self.guest_props = [] for prop in config.items('properties'): self.guest_props.append(prop[1]) # save the config self.config = config def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on PyVmomi') parser.add_argument('--debug', action='store_true', default=False, help='show debug info') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to VSphere (default: False - use cache files)') parser.add_argument('--max-instances', default=None, type=int, help='maximum number of instances to retrieve') self.args = parser.parse_args() def get_instances(self): ''' Get a list of vm instances with pyvmomi ''' kwargs = {'host': self.server, 'user': self.username, 'pwd': self.password, 'port': int(self.port)} if hasattr(ssl, 'SSLContext') and not self.validate_certs: context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.verify_mode = ssl.CERT_NONE kwargs['sslContext'] = context return self._get_instances(kwargs) def _get_instances(self, inkwargs): ''' Make API calls ''' instances = [] try: si = SmartConnect(**inkwargs) except ssl.SSLError as connection_error: if '[SSL: CERTIFICATE_VERIFY_FAILED]' in str(connection_error) and self.validate_certs: sys.exit("Unable to connect to ESXi server due to %s, " "please specify validate_certs=False and try again" % connection_error) except Exception as exc: self.debugl("Unable to connect to ESXi server due to %s" % exc) sys.exit("Unable to connect to ESXi server due to %s" % exc) self.debugl('retrieving all instances') if not si: sys.exit("Could not connect to the specified host using specified " "username and password") atexit.register(Disconnect, si) content = si.RetrieveContent() # Create a search container for virtualmachines self.debugl('creating containerview for virtualmachines') container = content.rootFolder viewType = [vim.VirtualMachine] recursive = True containerView = content.viewManager.CreateContainerView(container, viewType, recursive) children = containerView.view for child in children: # If requested, limit the total number of instances if self.args.max_instances: if len(instances) >= self.args.max_instances: break instances.append(child) self.debugl("%s total instances in container view" % len(instances)) if self.args.host: instances = [x for x in instances if x.name == self.args.host] instance_tuples = [] for instance in sorted(instances): if self.guest_props: ifacts = self.facts_from_proplist(instance) else: ifacts = self.facts_from_vobj(instance) instance_tuples.append((instance, ifacts)) self.debugl('facts collected for all instances') try: cfm = content.customFieldsManager if cfm is not None and cfm.field: for f in cfm.field: if f.managedObjectType == vim.VirtualMachine: self.custom_fields[f.key] = f.name self.debugl('%d custom fields collected' % len(self.custom_fields)) except vmodl.RuntimeFault as exc: self.debugl("Unable to gather custom fields due to %s" % exc.msg) except IndexError as exc: self.debugl("Unable to gather custom fields due to %s" % exc) return instance_tuples def instances_to_inventory(self, instances): ''' Convert a list of vm objects into a json compliant inventory ''' self.debugl('re-indexing instances based on ini settings') inventory = VMWareInventory._empty_inventory() inventory['all'] = {} inventory['all']['hosts'] = [] for idx, instance in enumerate(instances): # make a unique id for this object to avoid vmware's # numerous uuid's which aren't all unique. thisid = str(uuid.uuid4()) idata = instance[1] # Put it in the inventory inventory['all']['hosts'].append(thisid) inventory['_meta']['hostvars'][thisid] = idata.copy() inventory['_meta']['hostvars'][thisid]['ansible_uuid'] = thisid # Make a map of the uuid to the alias the user wants name_mapping = self.create_template_mapping( inventory, self.config.get('vmware', 'alias_pattern') ) # Make a map of the uuid to the ssh hostname the user wants host_mapping = self.create_template_mapping( inventory, self.config.get('vmware', 'host_pattern') ) # Reset the inventory keys for k, v in name_mapping.items(): if not host_mapping or k not in host_mapping: continue # set ansible_host (2.x) try: inventory['_meta']['hostvars'][k]['ansible_host'] = host_mapping[k] # 1.9.x backwards compliance inventory['_meta']['hostvars'][k]['ansible_ssh_host'] = host_mapping[k] except Exception: continue if k == v: continue # add new key inventory['all']['hosts'].append(v) inventory['_meta']['hostvars'][v] = inventory['_meta']['hostvars'][k] # cleanup old key inventory['all']['hosts'].remove(k) inventory['_meta']['hostvars'].pop(k, None) self.debugl('pre-filtered hosts:') for i in inventory['all']['hosts']: self.debugl(' * %s' % i) # Apply host filters for hf in self.host_filters: if not hf: continue self.debugl('filter: %s' % hf) filter_map = self.create_template_mapping(inventory, hf, dtype='boolean') for k, v in filter_map.items(): if not v: # delete this host inventory['all']['hosts'].remove(k) inventory['_meta']['hostvars'].pop(k, None) self.debugl('post-filter hosts:') for i in inventory['all']['hosts']: self.debugl(' * %s' % i) # Create groups for gbp in self.groupby_patterns: groupby_map = self.create_template_mapping(inventory, gbp) for k, v in groupby_map.items(): if v not in inventory: inventory[v] = {} inventory[v]['hosts'] = [] if k not in inventory[v]['hosts']: inventory[v]['hosts'].append(k) if self.config.get('vmware', 'groupby_custom_field'): for k, v in inventory['_meta']['hostvars'].items(): if 'customvalue' in v: for tv in v['customvalue']: if not isinstance(tv['value'], string_types): continue newkey = None field_name = self.custom_fields[tv['key']] if tv['key'] in self.custom_fields else tv['key'] values = [] keylist = map(lambda x: x.strip(), tv['value'].split(',')) for kl in keylist: try: newkey = self.config.get('vmware', 'custom_field_group_prefix') + field_name + '_' + kl newkey = newkey.strip() except Exception as e: self.debugl(e) values.append(newkey) for tag in values: if not tag: continue if tag not in inventory: inventory[tag] = {} inventory[tag]['hosts'] = [] if k not in inventory[tag]['hosts']: inventory[tag]['hosts'].append(k) return inventory def create_template_mapping(self, inventory, pattern, dtype='string'): ''' Return a hash of uuid to templated string from pattern ''' mapping = {} for k, v in inventory['_meta']['hostvars'].items(): t = self.env.from_string(pattern) newkey = None try: newkey = t.render(v) newkey = newkey.strip() except Exception as e: self.debugl(e) if not newkey: continue elif dtype == 'integer': newkey = int(newkey) elif dtype == 'boolean': if newkey.lower() == 'false': newkey = False elif newkey.lower() == 'true': newkey = True elif dtype == 'string': pass mapping[k] = newkey return mapping def facts_from_proplist(self, vm): '''Get specific properties instead of serializing everything''' rdata = {} for prop in self.guest_props: self.debugl('getting %s property for %s' % (prop, vm.name)) key = prop if self.lowerkeys: key = key.lower() if '.' not in prop: # props without periods are direct attributes of the parent rdata[key] = getattr(vm, prop) else: # props with periods are subkeys of parent attributes parts = prop.split('.') total = len(parts) - 1 # pointer to the current object val = None # pointer to the current result key lastref = rdata for idx, x in enumerate(parts): if isinstance(val, dict): if x in val: val = val.get(x) elif x.lower() in val: val = val.get(x.lower()) else: # if the val wasn't set yet, get it from the parent if not val: try: val = getattr(vm, x) except AttributeError as e: self.debugl(e) else: # in a subkey, get the subprop from the previous attrib try: val = getattr(val, x) except AttributeError as e: self.debugl(e) # make sure it serializes val = self._process_object_types(val) # lowercase keys if requested if self.lowerkeys: x = x.lower() # change the pointer or set the final value if idx != total: if x not in lastref: lastref[x] = {} lastref = lastref[x] else: lastref[x] = val return rdata def facts_from_vobj(self, vobj, level=0): ''' Traverse a VM object and return a json compliant data structure ''' # pyvmomi objects are not yet serializable, but may be one day ... # https://github.com/vmware/pyvmomi/issues/21 # WARNING: # Accessing an object attribute will trigger a SOAP call to the remote. # Increasing the attributes collected or the depth of recursion greatly # increases runtime duration and potentially memory+network utilization. if level == 0: try: self.debugl("get facts for %s" % vobj.name) except Exception as e: self.debugl(e) rdata = {} methods = dir(vobj) methods = [str(x) for x in methods if not x.startswith('_')] methods = [x for x in methods if x not in self.bad_types] methods = [x for x in methods if not x.lower() in self.skip_keys] methods = sorted(methods) for method in methods: # Attempt to get the method, skip on fail try: methodToCall = getattr(vobj, method) except Exception as e: continue # Skip callable methods if callable(methodToCall): continue if self.lowerkeys: method = method.lower() rdata[method] = self._process_object_types( methodToCall, thisvm=vobj, inkey=method, ) return rdata def _process_object_types(self, vobj, thisvm=None, inkey='', level=0): ''' Serialize an object ''' rdata = {} if type(vobj).__name__ in self.vimTableMaxDepth and level >= self.vimTableMaxDepth[type(vobj).__name__]: return rdata if vobj is None: rdata = None elif type(vobj) in self.vimTable: rdata = {} for key in self.vimTable[type(vobj)]: try: rdata[key] = getattr(vobj, key) except Exception as e: self.debugl(e) elif issubclass(type(vobj), str) or isinstance(vobj, str): if vobj.isalnum(): rdata = vobj else: rdata = vobj.decode('ascii', 'ignore') elif issubclass(type(vobj), bool) or isinstance(vobj, bool): rdata = vobj elif issubclass(type(vobj), integer_types) or isinstance(vobj, integer_types): rdata = vobj elif issubclass(type(vobj), float) or isinstance(vobj, float): rdata = vobj elif issubclass(type(vobj), list) or issubclass(type(vobj), tuple): rdata = [] try: vobj = sorted(vobj) except Exception: pass for idv, vii in enumerate(vobj): if level + 1 <= self.maxlevel: vid = self._process_object_types( vii, thisvm=thisvm, inkey=inkey + '[' + str(idv) + ']', level=(level + 1) ) if vid: rdata.append(vid) elif issubclass(type(vobj), dict): pass elif issubclass(type(vobj), object): methods = dir(vobj) methods = [str(x) for x in methods if not x.startswith('_')] methods = [x for x in methods if x not in self.bad_types] methods = [x for x in methods if not inkey + '.' + x.lower() in self.skip_keys] methods = sorted(methods) for method in methods: # Attempt to get the method, skip on fail try: methodToCall = getattr(vobj, method) except Exception as e: continue if callable(methodToCall): continue if self.lowerkeys: method = method.lower() if level + 1 <= self.maxlevel: try: rdata[method] = self._process_object_types( methodToCall, thisvm=thisvm, inkey=inkey + '.' + method, level=(level + 1) ) except vim.fault.NoPermission: self.debugl("Skipping method %s (NoPermission)" % method) else: pass return rdata def get_host_info(self, host): ''' Return hostvars for a single host ''' if host in self.inventory['_meta']['hostvars']: return self.inventory['_meta']['hostvars'][host] elif self.args.host and self.inventory['_meta']['hostvars']: match = None for k, v in self.inventory['_meta']['hostvars'].items(): if self.inventory['_meta']['hostvars'][k]['name'] == self.args.host: match = k break if match: return self.inventory['_meta']['hostvars'][match] else: raise VMwareMissingHostException('%s not found' % host) else: raise VMwareMissingHostException('%s not found' % host) if __name__ == "__main__": # Run the script print(VMWareInventory().show()) ansible-2.5.1/contrib/inventory/windows_azure.ini0000644000000000000000000000154113265756155022210 0ustar rootroot00000000000000# Ansible Windows Azure external inventory script settings # [azure] # The module needs your Windows Azure subscription ID and Management certificate path. # These may also be specified on the command line via --subscription-id and --cert-path # or via the environment variables AZURE_SUBSCRIPTION_ID and AZURE_CERT_PATH # #subscription_id = aaaaaaaa-1234-1234-1234-aaaaaaaaaaaa #cert_path = /path/to/cert.pem # API calls to Windows Azure may be slow. For this reason, we cache the results # of an API call. Set this to the path you want cache files to be written to. # Two files will be written to this directory: # - ansible-azure.cache # - ansible-azure.index # cache_path = /tmp # The number of seconds a cache file is considered valid. After this many # seconds, a new API call will be made, and the cache file will be updated. # cache_max_age = 300 ansible-2.5.1/contrib/inventory/windows_azure.py0000755000000000000000000002566313265756155022077 0ustar rootroot00000000000000#!/usr/bin/env python ''' Windows Azure external inventory script ======================================= Generates inventory that Ansible can understand by making API request to Windows Azure using the azure python library. NOTE: This script assumes Ansible is being executed where azure is already installed. pip install azure Adapted from the ansible Linode plugin by Dan Slimmon. ''' # (c) 2013, John Whitbeck # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### # Standard imports import re import sys import argparse import os from ansible.module_utils.six.moves.urllib.parse import urlparse from time import time try: import json except ImportError: import simplejson as json try: from azure.servicemanagement import ServiceManagementService except ImportError as e: sys.exit("ImportError: {0}".format(str(e))) # Imports for ansible import ConfigParser class AzureInventory(object): def __init__(self): """Main execution path.""" # Inventory grouped by display group self.inventory = {} # Index of deployment name -> host self.index = {} self.host_metadata = {} # Cache setting defaults. # These can be overridden in settings (see `read_settings`). cache_dir = os.path.expanduser('~') self.cache_path_cache = os.path.join(cache_dir, '.ansible-azure.cache') self.cache_path_index = os.path.join(cache_dir, '.ansible-azure.index') self.cache_max_age = 0 # Read settings and parse CLI arguments self.read_settings() self.read_environment() self.parse_cli_args() # Initialize Azure ServiceManagementService self.sms = ServiceManagementService(self.subscription_id, self.cert_path) # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() if self.args.list_images: data_to_print = self.json_format_dict(self.get_images(), True) elif self.args.list or self.args.host: # Display list of nodes for inventory if len(self.inventory) == 0: data = json.loads(self.get_inventory_from_cache()) else: data = self.inventory if self.args.host: data_to_print = self.get_host(self.args.host) else: # Add the `['_meta']['hostvars']` information. hostvars = {} if len(data) > 0: for host in set([h for hosts in data.values() for h in hosts if h]): hostvars[host] = self.get_host(host, jsonify=False) data['_meta'] = {'hostvars': hostvars} # JSONify the data. data_to_print = self.json_format_dict(data, pretty=True) print(data_to_print) def get_host(self, hostname, jsonify=True): """Return information about the given hostname, based on what the Windows Azure API provides. """ if hostname not in self.host_metadata: return "No host found: %s" % json.dumps(self.host_metadata) if jsonify: return json.dumps(self.host_metadata[hostname]) return self.host_metadata[hostname] def get_images(self): images = [] for image in self.sms.list_os_images(): if str(image.label).lower().find(self.args.list_images.lower()) >= 0: images.append(vars(image)) return json.loads(json.dumps(images, default=lambda o: o.__dict__)) def is_cache_valid(self): """Determines if the cache file has expired, or if it is still valid.""" if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): """Reads the settings from the .ini file.""" config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/windows_azure.ini') # Credentials related if config.has_option('azure', 'subscription_id'): self.subscription_id = config.get('azure', 'subscription_id') if config.has_option('azure', 'cert_path'): self.cert_path = config.get('azure', 'cert_path') # Cache related if config.has_option('azure', 'cache_path'): cache_path = os.path.expandvars(os.path.expanduser(config.get('azure', 'cache_path'))) self.cache_path_cache = os.path.join(cache_path, 'ansible-azure.cache') self.cache_path_index = os.path.join(cache_path, 'ansible-azure.index') if config.has_option('azure', 'cache_max_age'): self.cache_max_age = config.getint('azure', 'cache_max_age') def read_environment(self): ''' Reads the settings from environment variables ''' # Credentials if os.getenv("AZURE_SUBSCRIPTION_ID"): self.subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID") if os.getenv("AZURE_CERT_PATH"): self.cert_path = os.getenv("AZURE_CERT_PATH") def parse_cli_args(self): """Command line argument processing""" parser = argparse.ArgumentParser( description='Produce an Ansible Inventory file based on Azure', ) parser.add_argument('--list', action='store_true', default=True, help='List nodes (default: True)') parser.add_argument('--list-images', action='store', help='Get all available images.') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of thecache by making API requests to Azure ' '(default: False - use cache files)') parser.add_argument('--host', action='store', help='Get all information about an instance.') self.args = parser.parse_args() def do_api_calls_update_cache(self): """Do API calls, and save data in cache files.""" self.add_cloud_services() self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def add_cloud_services(self): """Makes an Azure API call to get the list of cloud services.""" try: for cloud_service in self.sms.list_hosted_services(): self.add_deployments(cloud_service) except Exception as e: sys.exit("Error: Failed to access cloud services - {0}".format(str(e))) def add_deployments(self, cloud_service): """Makes an Azure API call to get the list of virtual machines associated with a cloud service. """ try: for deployment in self.sms.get_hosted_service_properties(cloud_service.service_name, embed_detail=True).deployments.deployments: self.add_deployment(cloud_service, deployment) except Exception as e: sys.exit("Error: Failed to access deployments - {0}".format(str(e))) def add_deployment(self, cloud_service, deployment): """Adds a deployment to the inventory and index""" for role in deployment.role_instance_list.role_instances: try: # Default port 22 unless port found with name 'SSH' port = '22' for ie in role.instance_endpoints.instance_endpoints: if ie.name == 'SSH': port = ie.public_port break except AttributeError as e: pass finally: self.add_instance(role.instance_name, deployment, port, cloud_service, role.instance_status) def add_instance(self, hostname, deployment, ssh_port, cloud_service, status): """Adds an instance to the inventory and index""" dest = urlparse(deployment.url).hostname # Add to index self.index[hostname] = deployment.name self.host_metadata[hostname] = dict(ansible_ssh_host=dest, ansible_ssh_port=int(ssh_port), instance_status=status, private_id=deployment.private_id) # List of all azure deployments self.push(self.inventory, "azure", hostname) # Inventory: Group by service name self.push(self.inventory, self.to_safe(cloud_service.service_name), hostname) if int(ssh_port) == 22: self.push(self.inventory, "Cloud_services", hostname) # Inventory: Group by region self.push(self.inventory, self.to_safe(cloud_service.hosted_service_properties.location), hostname) def push(self, my_dict, key, element): """Pushed an element onto an array that may not have been defined in the dict.""" if key in my_dict: my_dict[key].append(element) else: my_dict[key] = [element] def get_inventory_from_cache(self): """Reads the inventory from the cache file and returns it as a JSON object.""" cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() return json_inventory def load_index_from_cache(self): """Reads the index from the cache file and sets self.index.""" cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index) def write_to_cache(self, data, filename): """Writes data in JSON format to a file.""" json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): """Escapes any characters that would be invalid in an ansible group name.""" return re.sub(r"[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): """Converts a dict to a JSON object and dumps it as a formatted string.""" if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) AzureInventory() ansible-2.5.1/contrib/inventory/zabbix.ini0000644000000000000000000000025113265756155020564 0ustar rootroot00000000000000# Ansible Zabbix external inventory script settings # [zabbix] # Server location server = http://zabbix.example.com/zabbix # Login username = admin password = zabbix ansible-2.5.1/contrib/inventory/zabbix.py0000755000000000000000000001072413265756155020446 0ustar rootroot00000000000000#!/usr/bin/env python # (c) 2013, Greg Buehler # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### """ Zabbix Server external inventory script. ======================================== Returns hosts and hostgroups from Zabbix Server. If you want to run with --limit against a host group with space in the name, use asterisk. For example --limit="Linux*servers". Configuration is read from `zabbix.ini`. Tested with Zabbix Server 2.0.6 and 3.2.3. """ from __future__ import print_function import os import sys import argparse try: import ConfigParser as configparser except ImportError: import configparser try: from zabbix_api import ZabbixAPI except: print("Error: Zabbix API library must be installed: pip install zabbix-api.", file=sys.stderr) sys.exit(1) try: import json except: import simplejson as json class ZabbixInventory(object): def read_settings(self): config = configparser.SafeConfigParser() conf_path = './zabbix.ini' if not os.path.exists(conf_path): conf_path = os.path.dirname(os.path.realpath(__file__)) + '/zabbix.ini' if os.path.exists(conf_path): config.read(conf_path) # server if config.has_option('zabbix', 'server'): self.zabbix_server = config.get('zabbix', 'server') # login if config.has_option('zabbix', 'username'): self.zabbix_username = config.get('zabbix', 'username') if config.has_option('zabbix', 'password'): self.zabbix_password = config.get('zabbix', 'password') def read_cli(self): parser = argparse.ArgumentParser() parser.add_argument('--host') parser.add_argument('--list', action='store_true') self.options = parser.parse_args() def hoststub(self): return { 'hosts': [] } def get_host(self, api, name): data = {'ansible_ssh_host': name} return data def get_list(self, api): hostsData = api.host.get({'output': 'extend', 'selectGroups': 'extend'}) data = {} data[self.defaultgroup] = self.hoststub() for host in hostsData: hostname = host['name'] data[self.defaultgroup]['hosts'].append(hostname) for group in host['groups']: groupname = group['name'] if groupname not in data: data[groupname] = self.hoststub() data[groupname]['hosts'].append(hostname) # Prevents Ansible from calling this script for each server with --host data['_meta'] = {'hostvars': self.meta} return data def __init__(self): self.defaultgroup = 'group_all' self.zabbix_server = None self.zabbix_username = None self.zabbix_password = None self.meta = {} self.read_settings() self.read_cli() if self.zabbix_server and self.zabbix_username: try: api = ZabbixAPI(server=self.zabbix_server) api.login(user=self.zabbix_username, password=self.zabbix_password) except BaseException as e: print("Error: Could not login to Zabbix server. Check your zabbix.ini.", file=sys.stderr) sys.exit(1) if self.options.host: data = self.get_host(api, self.options.host) print(json.dumps(data, indent=2)) elif self.options.list: data = self.get_list(api) print(json.dumps(data, indent=2)) else: print("usage: --list ..OR.. --host ", file=sys.stderr) sys.exit(1) else: print("Error: Configuration of server and credentials are required. See zabbix.ini.", file=sys.stderr) sys.exit(1) ZabbixInventory() ansible-2.5.1/contrib/inventory/zone.py0000755000000000000000000000272213265756155020141 0ustar rootroot00000000000000#!/usr/bin/env python # (c) 2015, Dagobert Michelsen # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . from subprocess import Popen, PIPE import sys import json result = {} result['all'] = {} pipe = Popen(['zoneadm', 'list', '-ip'], stdout=PIPE, universal_newlines=True) result['all']['hosts'] = [] for l in pipe.stdout.readlines(): # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared s = l.split(':') if s[1] != 'global': result['all']['hosts'].append(s[1]) result['all']['vars'] = {} result['all']['vars']['ansible_connection'] = 'zone' if len(sys.argv) == 2 and sys.argv[1] == '--list': print(json.dumps(result)) elif len(sys.argv) == 3 and sys.argv[1] == '--host': print(json.dumps({'ansible_connection': 'zone'})) else: sys.stderr.write("Need an argument, either --list or --host \n") ansible-2.5.1/contrib/README.md0000644000000000000000000000244513265756155016035 0ustar rootroot00000000000000contrib ------- Files here provide an extension mechanism for Ansible similar to plugins. They are not maintained by the Ansible core team or installed with Ansible. inventory ========= Before 2.4 introduced inventory plugins, inventory scripts were the only way to provide sources that were not built into Ansible. Inventory scripts allow you to store your hosts, groups, and variables in any way you like. Starting with Ansible version 2.4, they are enabled via the 'script' inventory plugin. Examples of use include discovering inventory from EC2 or pulling it from Cobbler. These could also be used to interface with LDAP or the database. `chmod +x` an inventory plugin and either name it `/etc/ansible/hosts` or use `ansible -i /path/to/inventory/script`. You might also need to copy a configuration file with the same name and/or set environment variables. The scripts or configuration files can provide more details. vault ===== If the file passed to `--vault-password-file` has the executable bit set, Ansible will execute it and use the stdout of that execution as 'the secret'. Vault scripts provided here use this facility to retrieve the vault secret from a number of sources. contributions welcome ===================== Send in pull requests to add scripts of your own. The sky is the limit! ansible-2.5.1/docs/0000755000000000000000000000000013265756221014033 5ustar rootroot00000000000000ansible-2.5.1/docs/api/0000755000000000000000000000000013265756221014604 5ustar rootroot00000000000000ansible-2.5.1/docs/api/Makefile0000644000000000000000000001727213265756155016263 0ustar rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. LOGFILE = sphinx.log FULL_TRACEBACKS = -T CPUS ?= 4 VERBOSITY ?= -v FORCE_REBUILD = -a -E NITPICK ?= SPHINXOPTS = -j $(CPUS) -w $(LOGFILE) $(FULL_TRACEBACKS) $(FORCE_REBUILD) $(NITPICK) $(VERBOSITY) SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build RSTDIR = rst MODULES_PATH = ../../lib EXCLUDE_PATHS = ../../lib/ansible/modules ../../lib/ansible/utils/module_docs_fragments ../../lib/ansible/module_utils/six DOC_PROJECTS = "Ansible API" # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" .PHONY: clean clean: rm -rf $(BUILDDIR)/* rm -rf $(RSTDIR)/*.rst .PHONY: apidoc apidoc: sphinx-apidoc --module-first --doc-project $(DOC_PROJECT) --force --maxdepth 7 -o $(RSTDIR) $(MODULES_PATH) $(EXCLUDE_PATHS) .PHONY: html html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: docs docs: clean apidoc html .PHONY: webdocs webdocs: clean apidoc html .PHONY: dirhtml dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." .PHONY: singlehtml singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." .PHONY: pickle pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." .PHONY: json json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." .PHONY: htmlhelp htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." .PHONY: qthelp qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Ansible.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Ansible.qhc" .PHONY: applehelp applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." .PHONY: devhelp devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Ansible" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Ansible" @echo "# devhelp" .PHONY: epub epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." .PHONY: latex latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." .PHONY: latexpdf latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: latexpdfja latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: text text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." .PHONY: man man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." .PHONY: texinfo texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." .PHONY: info info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." .PHONY: gettext gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." .PHONY: changes changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." .PHONY: linkcheck linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." .PHONY: doctest doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." .PHONY: coverage coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." .PHONY: xml xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." .PHONY: pseudoxml pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." ansible-2.5.1/docs/api/conf.py0000644000000000000000000002355113265756155016117 0ustar rootroot00000000000000# -*- coding: utf-8 -*- # # Ansible documentation build configuration file, created by # sphinx-quickstart on Fri Jun 3 17:34:17 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('../bin')) sys.path.insert(0, os.path.abspath('../lib/ansible')) import sphinx_rtd_theme import alabaster # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.graphviz', 'sphinx.ext.inheritance_diagram', 'alabaster', ] # autodoc_default_flags = ['members', 'show-inheritance', 'inherited-members', 'undoc-members', ] autodoc_default_flags = ['members', 'show-inheritance', 'undoc-members', ] autoclass_content = 'both' autodoc_member_order = 'bysource' autodoc_mock_imports = ['xmltodict', 'winrm', 'redis', 'StricRedis'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Ansible' copyright = u'2016, Red Hat' author = u'Red Hat' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = u'2.3' # The full version, including alpha/beta/rc tags. release = u'1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # html_theme_path = ['../docsite/_themes'] # html_theme = 'srtd' html_short_title = 'Ansible Documentation' # html_theme = "sphinx_rtd_theme" # html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] html_theme_path = [alabaster.get_path()] html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'Ansibledoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', # Latex figure (float) alignment # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Ansible.tex', u'Ansible Documentation', u'Red Hat', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'ansible', u'Ansible Documentation', [author], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Ansible', u'Ansible Documentation', author, 'Ansible', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False ansible-2.5.1/docs/api/docs-requirements.txt0000644000000000000000000000014213265756155021021 0ustar rootroot00000000000000sphinx # extensions sphinxcontrib-napoleon sphinxcontrib-inheritance sphinx-rtd-theme alabaster ansible-2.5.1/docs/api/index.rst0000644000000000000000000000065213265756155016456 0ustar rootroot00000000000000.. ansible documentation master file, created by sphinx-quickstart on Tue Jun 14 10:43:24 2016. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to ansible's documentation! =================================== Contents: .. toctree:: :maxdepth: 2 Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ansible-2.5.1/docs/api/modules.rst0000644000000000000000000000010213265756155017005 0ustar rootroot00000000000000Ansible api =========== .. toctree:: :maxdepth: 7 ansible ansible-2.5.1/docs/bin/0000755000000000000000000000000013265756221014603 5ustar rootroot00000000000000ansible-2.5.1/docs/bin/dump_config.py0000755000000000000000000000431113265756155017457 0ustar rootroot00000000000000#!/usr/bin/env python import optparse import os import sys import yaml from jinja2 import Environment, FileSystemLoader DEFAULT_TEMPLATE_FILE = 'config.rst.j2' def generate_parser(): p = optparse.OptionParser( version='%prog 1.0', usage='usage: %prog [options]', description='Generate module documentation from metadata', ) p.add_option("-t", "--template-file", action="store", dest="template_file", default=DEFAULT_TEMPLATE_FILE, help="directory containing Jinja2 templates") p.add_option("-o", "--output-dir", action="store", dest="output_dir", default='/tmp/', help="Output directory for rst files") p.add_option("-d", "--docs-source", action="store", dest="docs", default=None, help="Source for attribute docs") (options, args) = p.parse_args() return p def fix_description(config_options): '''some descriptions are strings, some are lists. workaround it...''' for config_key in config_options: description = config_options[config_key].get('description', []) if isinstance(description, list): desc_list = description else: desc_list = [description] config_options[config_key]['description'] = desc_list return config_options def main(args): parser = generate_parser() (options, args) = parser.parse_args() output_dir = os.path.abspath(options.output_dir) template_file_full_path = os.path.abspath(options.template_file) template_file = os.path.basename(template_file_full_path) template_dir = os.path.dirname(os.path.abspath(template_file_full_path)) if options.docs: with open(options.docs) as f: docs = yaml.safe_load(f) else: docs = {} config_options = docs config_options = fix_description(config_options) env = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True,) template = env.get_template(template_file) output_name = os.path.join(output_dir, template_file.replace('.j2', '')) temp_vars = {'config_options': config_options} with open(output_name, 'wb') as f: f.write(template.render(temp_vars).encode('utf-8')) return 0 if __name__ == '__main__': sys.exit(main(sys.argv[:])) ansible-2.5.1/docs/bin/dump_keywords.py0000755000000000000000000000527413265756155020072 0ustar rootroot00000000000000#!/usr/bin/env python import optparse import re from distutils.version import LooseVersion import jinja2 import yaml from jinja2 import Environment, FileSystemLoader from ansible.playbook import Play from ansible.playbook.block import Block from ansible.playbook.role import Role from ansible.playbook.task import Task template_file = 'playbooks_keywords.rst.j2' oblist = {} clist = [] class_list = [Play, Role, Block, Task] p = optparse.OptionParser( version='%prog 1.0', usage='usage: %prog [options]', description='Generate playbook keyword documentation from code and descriptions', ) p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="../templates", help="directory containing Jinja2 templates") p.add_option("-o", "--output-dir", action="store", dest="output_dir", default='/tmp/', help="Output directory for rst files") p.add_option("-d", "--docs-source", action="store", dest="docs", default=None, help="Source for attribute docs") (options, args) = p.parse_args() for aclass in class_list: aobj = aclass() name = type(aobj).__name__ if options.docs: with open(options.docs) as f: docs = yaml.safe_load(f) else: docs = {} # build ordered list to loop over and dict with attributes clist.append(name) oblist[name] = dict((x, aobj.__dict__['_attributes'][x]) for x in aobj.__dict__['_attributes'] if 'private' not in x or not x.private) # pick up docs if they exist for a in oblist[name]: if a in docs: oblist[name][a] = docs[a] else: oblist[name][a] = ' UNDOCUMENTED!! ' # loop is really with_ for users if name == 'Task': oblist[name]['with_'] = 'DEPRECATED: use ``loop`` instead, ``with_`` used to be how loops were defined, ' 'it can use any available lookup plugin to generate the item list' # local_action is implicit with action if 'action' in oblist[name]: oblist[name]['local_action'] = 'Same as action but also implies ``delegate_to: localhost``' # remove unusable (used to be private?) for nouse in ('loop_args'): if nouse in oblist[name]: del oblist[name][nouse] env = Environment(loader=FileSystemLoader(options.template_dir), trim_blocks=True,) template = env.get_template(template_file) outputname = options.output_dir + template_file.replace('.j2', '') tempvars = {'oblist': oblist, 'clist': clist} keyword_page = template.render(tempvars) if LooseVersion(jinja2.__version__) < LooseVersion('2.10'): # jinja2 < 2.10's indent filter indents blank lines. Cleanup keyword_page = re.sub(' +\n', '\n', keyword_page) with open(outputname, 'w') as f: f.write(keyword_page) ansible-2.5.1/docs/bin/generate_man.py0000755000000000000000000002074313265756155017621 0ustar rootroot00000000000000#!/usr/bin/env python import optparse import os import pprint import sys from jinja2 import Environment, FileSystemLoader from ansible.module_utils._text import to_bytes def generate_parser(): p = optparse.OptionParser( version='%prog 1.0', usage='usage: %prog [options]', description='Generate cli documentation from cli docstrings', ) p.add_option("-t", "--template-file", action="store", dest="template_file", default="../templates/man.j2", help="path to jinja2 template") p.add_option("-o", "--output-dir", action="store", dest="output_dir", default='/tmp/', help="Output directory for rst files") p.add_option("-f", "--output-format", action="store", dest="output_format", default='man', help="Output format for docs (the default 'man' or 'rst')") return p # from https://www.python.org/dev/peps/pep-0257/ def trim_docstring(docstring): if not docstring: return '' # Convert tabs to spaces (following the normal Python rules) # and split into a list of lines: lines = docstring.expandtabs().splitlines() # Determine minimum indentation (first line doesn't count): indent = sys.maxsize for line in lines[1:]: stripped = line.lstrip() if stripped: indent = min(indent, len(line) - len(stripped)) # Remove indentation (first line is special): trimmed = [lines[0].strip()] if indent < sys.maxsize: for line in lines[1:]: trimmed.append(line[indent:].rstrip()) # Strip off trailing and leading blank lines: while trimmed and not trimmed[-1]: trimmed.pop() while trimmed and not trimmed[0]: trimmed.pop(0) # Return a single string: return '\n'.join(trimmed) def get_options(optlist): ''' get actual options ''' opts = [] for opt in optlist: res = { 'desc': opt.help, 'options': opt._short_opts + opt._long_opts } if opt.action == 'store': res['arg'] = opt.dest.upper() opts.append(res) return opts def get_option_groups(option_parser): groups = [] for option_group in option_parser.option_groups: group_info = {} group_info['desc'] = option_group.get_description() group_info['options'] = option_group.option_list group_info['group_obj'] = option_group groups.append(group_info) return groups def opt_doc_list(cli): ''' iterate over options lists ''' results = [] for option_group in cli.parser.option_groups: results.extend(get_options(option_group.option_list)) results.extend(get_options(cli.parser.option_list)) return results # def opts_docs(cli, name): def opts_docs(cli_class_name, cli_module_name): ''' generate doc structure from options ''' cli_name = 'ansible-%s' % cli_module_name if cli_module_name == 'adhoc': cli_name = 'ansible' # WIth no action/subcommand # shared opts set # instantiate each cli and ask its options cli_klass = getattr(__import__("ansible.cli.%s" % cli_module_name, fromlist=[cli_class_name]), cli_class_name) cli = cli_klass([]) # parse the common options try: cli.parse() except: pass # base/common cli info docs = { 'cli': cli_module_name, 'cli_name': cli_name, 'usage': cli.parser.usage, 'short_desc': cli.parser.description, 'long_desc': trim_docstring(cli.__doc__), 'actions': {}, } option_info = {'option_names': [], 'options': [], 'groups': []} for extras in ('ARGUMENTS'): if hasattr(cli, extras): docs[extras.lower()] = getattr(cli, extras) common_opts = opt_doc_list(cli) groups_info = get_option_groups(cli.parser) shared_opt_names = [] for opt in common_opts: shared_opt_names.extend(opt.get('options', [])) option_info['options'] = common_opts option_info['option_names'] = shared_opt_names option_info['groups'].extend(groups_info) docs.update(option_info) # now for each action/subcommand # force populate parser with per action options # use class attrs not the attrs on a instance (not that it matters here...) for action in getattr(cli_klass, 'VALID_ACTIONS', ()): # instantiate each cli and ask its options action_cli_klass = getattr(__import__("ansible.cli.%s" % cli_module_name, fromlist=[cli_class_name]), cli_class_name) # init with args with action added? cli = action_cli_klass([]) cli.args.append(action) try: cli.parse() except: pass # FIXME/TODO: needed? # avoid dupe errors cli.parser.set_conflict_handler('resolve') cli.set_action() action_info = {'option_names': [], 'options': []} # docs['actions'][action] = {} # docs['actions'][action]['name'] = action action_info['name'] = action action_info['desc'] = trim_docstring(getattr(cli, 'execute_%s' % action).__doc__) # docs['actions'][action]['desc'] = getattr(cli, 'execute_%s' % action).__doc__.strip() action_doc_list = opt_doc_list(cli) uncommon_options = [] for action_doc in action_doc_list: # uncommon_options = [] option_aliases = action_doc.get('options', []) for option_alias in option_aliases: if option_alias in shared_opt_names: continue # TODO: use set if option_alias not in action_info['option_names']: action_info['option_names'].append(option_alias) if action_doc in action_info['options']: continue uncommon_options.append(action_doc) action_info['options'] = uncommon_options docs['actions'][action] = action_info docs['options'] = opt_doc_list(cli) return docs if __name__ == '__main__': parser = generate_parser() options, args = parser.parse_args() template_file = options.template_file template_path = os.path.expanduser(template_file) template_dir = os.path.abspath(os.path.dirname(template_path)) template_basename = os.path.basename(template_file) output_dir = os.path.abspath(options.output_dir) output_format = options.output_format cli_modules = args # various cli parsing things checks sys.argv if the 'args' that are passed in are [] # so just remove any args so the cli modules dont try to parse them resulting in warnings sys.argv = [sys.argv[0]] # need to be in right dir os.chdir(os.path.dirname(__file__)) allvars = {} output = {} cli_list = [] cli_bin_name_list = [] # for binary in os.listdir('../../lib/ansible/cli'): for cli_module_name in cli_modules: binary = os.path.basename(os.path.expanduser(cli_module_name)) if not binary.endswith('.py'): continue elif binary == '__init__.py': continue cli_name = os.path.splitext(binary)[0] if cli_name == 'adhoc': cli_class_name = 'AdHocCLI' # myclass = 'AdHocCLI' output[cli_name] = 'ansible.1.asciidoc.in' cli_bin_name = 'ansible' else: # myclass = "%sCLI" % libname.capitalize() cli_class_name = "%sCLI" % cli_name.capitalize() output[cli_name] = 'ansible-%s.1.asciidoc.in' % cli_name cli_bin_name = 'ansible-%s' % cli_name # FIXME: allvars[cli_name] = opts_docs(cli_class_name, cli_name) cli_bin_name_list.append(cli_bin_name) cli_list = allvars.keys() doc_name_formats = {'man': '%s.1.asciidoc.in', 'rst': '%s.rst'} for cli_name in cli_list: # template it! env = Environment(loader=FileSystemLoader(template_dir)) template = env.get_template(template_basename) # add rest to vars tvars = allvars[cli_name] tvars['cli_list'] = cli_list tvars['cli_bin_name_list'] = cli_bin_name_list tvars['cli'] = cli_name if '-i' in tvars['options']: print('uses inventory') manpage = template.render(tvars) filename = os.path.join(output_dir, doc_name_formats[output_format] % tvars['cli_name']) with open(filename, 'wb') as f: f.write(to_bytes(manpage)) print("Wrote doc to %s" % filename) ansible-2.5.1/docs/bin/plugin_formatter.py0000755000000000000000000006476513265756155020571 0ustar rootroot00000000000000#!/usr/bin/env python # (c) 2012, Jan-Piet Mens # (c) 2012-2014, Michael DeHaan and others # (c) 2017 Ansible Project # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . from __future__ import absolute_import, division, print_function __metaclass__ = type import datetime import glob import optparse import os import re import sys import warnings from collections import defaultdict from distutils.version import LooseVersion from pprint import PrettyPrinter try: from html import escape as html_escape except ImportError: # Python-3.2 or later import cgi def html_escape(text, quote=True): return cgi.escape(text, quote) import jinja2 import yaml from jinja2 import Environment, FileSystemLoader from six import iteritems, string_types from ansible.errors import AnsibleError from ansible.module_utils._text import to_bytes, to_text from ansible.plugins.loader import fragment_loader from ansible.utils import plugin_docs from ansible.utils.display import Display ##################################################################################### # constants and paths # if a module is added in a version of Ansible older than this, don't print the version added information # in the module documentation because everyone is assumed to be running something newer than this already. TOO_OLD_TO_BE_NOTABLE = 1.3 # Get parent directory of the directory this script lives in MODULEDIR = os.path.abspath(os.path.join( os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules' )) # The name of the DOCUMENTATION template EXAMPLE_YAML = os.path.abspath(os.path.join( os.path.dirname(os.path.realpath(__file__)), os.pardir, 'examples', 'DOCUMENTATION.yml' )) _ITALIC = re.compile(r"I\(([^)]+)\)") _BOLD = re.compile(r"B\(([^)]+)\)") _MODULE = re.compile(r"M\(([^)]+)\)") _URL = re.compile(r"U\(([^)]+)\)") _LINK = re.compile(r"L\(([^)]+),([^)]+)\)") _CONST = re.compile(r"C\(([^)]+)\)") _RULER = re.compile(r"HORIZONTALLINE") DEPRECATED = b" (D)" pp = PrettyPrinter() display = Display() def rst_ify(text): ''' convert symbols like I(this is in italics) to valid restructured text ''' try: t = _ITALIC.sub(r"*\1*", text) t = _BOLD.sub(r"**\1**", t) t = _MODULE.sub(r":ref:`\1 <\1>`", t) t = _LINK.sub(r"`\1 <\2>`_", t) t = _URL.sub(r"\1", t) t = _CONST.sub(r"`\1`", t) t = _RULER.sub(r"------------", t) except Exception as e: raise AnsibleError("Could not process (%s) : %s" % (text, e)) return t def html_ify(text): ''' convert symbols like I(this is in italics) to valid HTML ''' if not isinstance(text, string_types): text = to_text(text) t = html_escape(text) t = _ITALIC.sub(r"\1", t) t = _BOLD.sub(r"\1", t) t = _MODULE.sub(r"\1", t) t = _URL.sub(r"\1", t) t = _LINK.sub(r"\1", t) t = _CONST.sub(r"\1", t) t = _RULER.sub(r"
", t) return t def rst_fmt(text, fmt): ''' helper for Jinja2 to do format strings ''' return fmt % (text) def rst_xline(width, char="="): ''' return a restructured text line of a given length ''' return char * width def write_data(text, output_dir, outputname, module=None): ''' dumps module output to a file or the screen, as requested ''' if output_dir is not None: if module: outputname = outputname % module if not os.path.exists(output_dir): os.makedirs(output_dir) fname = os.path.join(output_dir, outputname) fname = fname.replace(".py", "") with open(fname, 'wb') as f: f.write(to_bytes(text)) else: print(text) def get_plugin_info(module_dir, limit_to=None, verbose=False): ''' Returns information about plugins and the categories that they belong to :arg module_dir: file system path to the top of the plugin directory :kwarg limit_to: If given, this is a list of plugin names to generate information for. All other plugins will be ignored. :returns: Tuple of two dicts containing module_info, categories, and aliases and a set listing deprecated modules: :module_info: mapping of module names to information about them. The fields of the dict are: :path: filesystem path to the module :deprecated: boolean. True means the module is deprecated otherwise not. :aliases: set of aliases to this module name :metadata: The modules metadata (as recorded in the module) :doc: The documentation structure for the module :examples: The module's examples :returndocs: The module's returndocs :categories: maps category names to a dict. The dict contains at least one key, '_modules' which contains a list of module names in that category. Any other keys in the dict are subcategories with the same structure. ''' categories = dict() module_info = defaultdict(dict) # * windows powershell modules have documentation stubs in python docstring # format (they are not executed) so skip the ps1 format files # * One glob level for every module level that we're going to traverse files = ( glob.glob("%s/*.py" % module_dir) + glob.glob("%s/*/*.py" % module_dir) + glob.glob("%s/*/*/*.py" % module_dir) + glob.glob("%s/*/*/*/*.py" % module_dir) ) for module_path in files: # Do not list __init__.py files if module_path.endswith('__init__.py'): continue # Do not list blacklisted modules module = os.path.splitext(os.path.basename(module_path))[0] if module in plugin_docs.BLACKLIST['MODULE'] or module == 'base': continue # If requested, limit module documentation building only to passed-in # modules. if limit_to is not None and module.lower() not in limit_to: continue deprecated = False if module.startswith("_"): if os.path.islink(module_path): # Handle aliases source = os.path.splitext(os.path.basename(os.path.realpath(module_path)))[0] module = module.replace("_", "", 1) aliases = module_info[source].get('aliases', set()) aliases.add(module) # In case we just created this via get()'s fallback module_info[source]['aliases'] = aliases continue else: # Handle deprecations module = module.replace("_", "", 1) deprecated = True # # Regular module to process # category = categories # Start at the second directory because we don't want the "vendor" mod_path_only = os.path.dirname(module_path[len(module_dir):]) module_categories = [] # build up the categories that this module belongs to for new_cat in mod_path_only.split('/')[1:]: if new_cat not in category: category[new_cat] = dict() category[new_cat]['_modules'] = [] module_categories.append(new_cat) category = category[new_cat] category['_modules'].append(module) # the category we will use in links (so list_of_all_plugins can point to plugins/action_plugins/*' if module_categories: primary_category = module_categories[0] # use ansible core library to parse out doc metadata YAML and plaintext examples doc, examples, returndocs, metadata = plugin_docs.get_docstring(module_path, fragment_loader, verbose=verbose) # save all the information module_info[module] = {'path': module_path, 'source': os.path.relpath(module_path, module_dir), 'deprecated': deprecated, 'aliases': set(), 'metadata': metadata, 'doc': doc, 'examples': examples, 'returndocs': returndocs, 'categories': module_categories, 'primary_category': primary_category, } # keep module tests out of becoming module docs if 'test' in categories: del categories['test'] return module_info, categories def generate_parser(): ''' generate an optparse parser ''' p = optparse.OptionParser( version='%prog 1.0', usage='usage: %prog [options] arg1 arg2', description='Generate module documentation from metadata', ) p.add_option("-A", "--ansible-version", action="store", dest="ansible_version", default="unknown", help="Ansible version number") p.add_option("-M", "--module-dir", action="store", dest="module_dir", default=MODULEDIR, help="Ansible library path") p.add_option("-P", "--plugin-type", action="store", dest="plugin_type", default='module', help="The type of plugin (module, lookup, etc)") p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates") p.add_option("-t", "--type", action='store', dest='type', choices=['rst'], default='rst', help="Document type") p.add_option("-o", "--output-dir", action="store", dest="output_dir", default=None, help="Output directory for module files") p.add_option("-I", "--includes-file", action="store", dest="includes_file", default=None, help="Create a file containing list of processed modules") p.add_option("-l", "--limit-to-modules", '--limit-to', action="store", dest="limit_to", default=None, help="Limit building module documentation to comma-separated list of plugins. Specify non-existing plugin name for no plugins.") p.add_option('-V', action='version', help='Show version number and exit') p.add_option('-v', '--verbose', dest='verbosity', default=0, action="count", help="verbose mode (increase number of 'v's for more)") return p def jinja2_environment(template_dir, typ, plugin_type): env = Environment(loader=FileSystemLoader(template_dir), variable_start_string="@{", variable_end_string="}@", trim_blocks=True) env.globals['xline'] = rst_xline templates = {} if typ == 'rst': env.filters['convert_symbols_to_format'] = rst_ify env.filters['html_ify'] = html_ify env.filters['fmt'] = rst_fmt env.filters['xline'] = rst_xline templates['plugin'] = env.get_template('plugin.rst.j2') if plugin_type == 'module': name = 'modules' else: name = 'plugins' templates['category_list'] = env.get_template('%s_by_category.rst.j2' % name) templates['support_list'] = env.get_template('%s_by_support.rst.j2' % name) templates['list_of_CATEGORY_modules'] = env.get_template('list_of_CATEGORY_%s.rst.j2' % name) else: raise Exception("Unsupported format type: %s" % typ) return templates def too_old(added): if not added: return False try: added_tokens = str(added).split(".") readded = added_tokens[0] + "." + added_tokens[1] added_float = float(readded) except ValueError as e: warnings.warn("Could not parse %s: %s" % (added, str(e))) return False return added_float < TOO_OLD_TO_BE_NOTABLE def process_plugins(module_map, templates, outputname, output_dir, ansible_version, plugin_type): for module in module_map: display.display("rendering: %s" % module) fname = module_map[module]['path'] display.vvvvv(pp.pformat(('process_plugins info: ', module_map[module]))) # crash if module is missing documentation and not explicitly hidden from docs index if module_map[module]['doc'] is None: display.error("%s MISSING DOCUMENTATION" % (fname,)) _doc = {plugin_type: module, 'version_added': '2.4', 'filename': fname} module_map[module]['doc'] = _doc # continue # Going to reference this heavily so make a short name to reference it by doc = module_map[module]['doc'] display.vvvvv(pp.pformat(('process_plugins doc: ', doc))) # add some defaults for plugins that dont have most of the info doc['module'] = doc.get('module', module) doc['version_added'] = doc.get('version_added', 'historical') doc['plugin_type'] = plugin_type if module_map[module]['deprecated'] and 'deprecated' not in doc: display.warning("%s PLUGIN MISSING DEPRECATION DOCUMENTATION: %s" % (fname, 'deprecated')) required_fields = ('short_description',) for field in required_fields: if field not in doc: display.warning("%s PLUGIN MISSING field '%s'" % (fname, field)) not_nullable_fields = ('short_description',) for field in not_nullable_fields: if field in doc and doc[field] in (None, ''): print("%s: WARNING: MODULE field '%s' DOCUMENTATION is null/empty value=%s" % (fname, field, doc[field])) if 'version_added' not in doc: display.error("*** ERROR: missing version_added in: %s ***\n" % module) # # The present template gets everything from doc so we spend most of this # function moving data into doc for the template to reference # if module_map[module]['aliases']: doc['aliases'] = module_map[module]['aliases'] # don't show version added information if it's too old to be called out added = 0 if doc['version_added'] == 'historical': del doc['version_added'] else: added = doc['version_added'] # Strip old version_added for the module if too_old(added): del doc['version_added'] option_names = [] if 'options' in doc and doc['options']: for (k, v) in iteritems(doc['options']): # Error out if there's no description if 'description' not in doc['options'][k]: raise AnsibleError("Missing required description for option %s in %s " % (k, module)) # Error out if required isn't a boolean (people have been putting # information on when something is required in here. Those need # to go in the description instead). required_value = doc['options'][k].get('required', False) if not isinstance(required_value, bool): raise AnsibleError("Invalid required value '%s' for option '%s' in '%s' (must be truthy)" % (required_value, k, module)) # Strip old version_added information for options if 'version_added' in doc['options'][k] and too_old(doc['options'][k]['version_added']): del doc['options'][k]['version_added'] # Make sure description is a list of lines for later formatting if not isinstance(doc['options'][k]['description'], list): doc['options'][k]['description'] = [doc['options'][k]['description']] option_names.append(k) option_names.sort() doc['option_keys'] = option_names doc['filename'] = fname doc['source'] = module_map[module]['source'] doc['docuri'] = doc['module'].replace('_', '-') doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') doc['ansible_version'] = ansible_version # check the 'deprecated' field in doc. We expect a dict potentially with 'why', 'version', and 'alternative' fields # examples = module_map[module]['examples'] # print('\n\n%s: type of examples: %s\n' % (module, type(examples))) # if examples and not isinstance(examples, (str, unicode, list)): # raise TypeError('module %s examples is wrong type (%s): %s' % (module, type(examples), examples)) # use 'examples' for 'plainexamples' if 'examples' is a string if isinstance(module_map[module]['examples'], string_types): doc['plainexamples'] = module_map[module]['examples'] # plain text else: doc['plainexamples'] = '' doc['metadata'] = module_map[module]['metadata'] display.vvvvv(pp.pformat(module_map[module])) if module_map[module]['returndocs']: try: doc['returndocs'] = yaml.safe_load(module_map[module]['returndocs']) except Exception as e: print("%s:%s:yaml error:%s:returndocs=%s" % (fname, module, e, module_map[module]['returndocs'])) doc['returndocs'] = None else: doc['returndocs'] = None doc['author'] = doc.get('author', ['UNKNOWN']) if isinstance(doc['author'], string_types): doc['author'] = [doc['author']] display.v('about to template %s' % module) display.vvvvv(pp.pformat(doc)) text = templates['plugin'].render(doc) if LooseVersion(jinja2.__version__) < LooseVersion('2.10'): # jinja2 < 2.10's indent filter indents blank lines. Cleanup text = re.sub(' +\n', '\n', text) write_data(text, output_dir, outputname, module) def process_categories(plugin_info, categories, templates, output_dir, output_name, plugin_type): for category in sorted(categories.keys()): module_map = categories[category] category_filename = output_name % category display.display("*** recording category %s in %s ***" % (category, category_filename)) # start a new category file category_name = category.replace("_", " ") category_title = category_name.title() subcategories = dict((k, v) for k, v in module_map.items() if k != '_modules') template_data = {'title': category_title, 'category_name': category_name, 'category': module_map, 'subcategories': subcategories, 'module_info': plugin_info, 'plugin_type': plugin_type } text = templates['list_of_CATEGORY_modules'].render(template_data) write_data(text, output_dir, category_filename) def process_support_levels(plugin_info, templates, output_dir, plugin_type): supported_by = {'Ansible Core Team': {'slug': 'core_supported', 'modules': [], 'output': 'core_maintained.rst', 'blurb': "These are :doc:`modules maintained by the" " Ansible Core Team` and will always ship" " with Ansible itself."}, 'Ansible Network Team': {'slug': 'network_supported', 'modules': [], 'output': 'network_maintained.rst', 'blurb': "These are :doc:`modules maintained by the" " Ansible Network Team` in" " a relationship similar to how the Ansible Core Team" " maintains the Core modules."}, 'Ansible Partners': {'slug': 'partner_supported', 'modules': [], 'output': 'partner_maintained.rst', 'blurb': """ Some examples of :doc:`Certified Modules` are those submitted by other companies. Maintainers of these types of modules must watch for any issues reported or pull requests raised against the module. The Ansible Core Team will review all modules becoming certified. Core committers will review proposed changes to existing Certified Modules once the community maintainers of the module have approved the changes. Core committers will also ensure that any issues that arise due to Ansible engine changes will be remediated. Also, it is strongly recommended (but not presently required) for these types of modules to have unit tests. These modules are currently shipped with Ansible, but might be shipped separately in the future. """}, 'Ansible Community': {'slug': 'community_supported', 'modules': [], 'output': 'community_maintained.rst', 'blurb': """ These are :doc:`modules maintained by the Ansible Community`. They **are not** supported by the Ansible Core Team or by companies/partners associated to the module. They are still fully usable, but the response rate to issues is purely up to the community. Best effort support will be provided but is not covered under any support contracts. These modules are currently shipped with Ansible, but will most likely be shipped separately in the future. """}, } # only gen support pages for modules for now, need to split and namespace templates and generated docs if plugin_type == 'plugins': return # Separate the modules by support_level for module, info in plugin_info.items(): if not info.get('metadata', None): display.warning('no metadata for %s' % module) continue if info['metadata']['supported_by'] == 'core': supported_by['Ansible Core Team']['modules'].append(module) elif info['metadata']['supported_by'] == 'network': supported_by['Ansible Network Team']['modules'].append(module) elif info['metadata']['supported_by'] == 'certified': supported_by['Ansible Partners']['modules'].append(module) elif info['metadata']['supported_by'] == 'community': supported_by['Ansible Community']['modules'].append(module) else: raise AnsibleError('Unknown supported_by value: %s' % info['metadata']['supported_by']) # Render the module lists for maintainers, data in supported_by.items(): template_data = {'maintainers': maintainers, 'modules': data['modules'], 'slug': data['slug'], 'module_info': plugin_info, 'plugin_type': plugin_type } text = templates['support_list'].render(template_data) write_data(text, output_dir, data['output']) def validate_options(options): ''' validate option parser options ''' if not options.module_dir: sys.exit("--module-dir is required", file=sys.stderr) if not os.path.exists(options.module_dir): sys.exit("--module-dir does not exist: %s" % options.module_dir, file=sys.stderr) if not options.template_dir: sys.exit("--template-dir must be specified") def main(): # INIT p = generate_parser() (options, args) = p.parse_args() validate_options(options) display.verbosity = options.verbosity plugin_type = options.plugin_type # prep templating templates = jinja2_environment(options.template_dir, options.type, plugin_type) # set file/directory structure if plugin_type == 'module': # trim trailing s off of plugin_type for plugin_type=='modules'. ie 'copy_module.rst' outputname = '%s_' + '%s.rst' % plugin_type output_dir = options.output_dir else: # for plugins, just use 'ssh.rst' vs 'ssh_module.rst' outputname = '%s.rst' output_dir = '%s/plugins/%s' % (options.output_dir, plugin_type) display.vv('output name: %s' % outputname) display.vv('output dir: %s' % output_dir) # Convert passed-in limit_to to None or list of modules. if options.limit_to is not None: options.limit_to = [s.lower() for s in options.limit_to.split(",")] plugin_info, categories = get_plugin_info(options.module_dir, limit_to=options.limit_to, verbose=(options.verbosity > 0)) categories['all'] = {'_modules': plugin_info.keys()} display.vvv(pp.pformat(categories)) display.vvvvv(pp.pformat(plugin_info)) # Transform the data if options.type == 'rst': display.v('Generating rst') for key, record in plugin_info.items(): display.vv(key) display.vvvvv(pp.pformat(('record', record))) if record.get('doc', None): short_desc = record['doc']['short_description'] if short_desc is None: display.warning('short_description for %s is None' % key) short_desc = '' record['doc']['short_description'] = rst_ify(short_desc) if plugin_type == 'module': display.v('Generating Categories') # Write module master category list category_list_text = templates['category_list'].render(categories=sorted(categories.keys())) category_index_name = '%ss_by_category.rst' % plugin_type write_data(category_list_text, output_dir, category_index_name) # Render all the individual plugin pages display.v('Generating plugin pages') process_plugins(plugin_info, templates, outputname, output_dir, options.ansible_version, plugin_type) # Render all the categories for modules if plugin_type == 'module': display.v('Generating Category lists') category_list_name_template = 'list_of_%s_' + '%ss.rst' % plugin_type process_categories(plugin_info, categories, templates, output_dir, category_list_name_template, plugin_type) # Render all the categories for modules process_support_levels(plugin_info, templates, output_dir, plugin_type) if __name__ == '__main__': main() ansible-2.5.1/docs/bin/testing_formatter.sh0000755000000000000000000000064113265756155020711 0ustar rootroot00000000000000#!/bin/bash -eu cat <<- EOF > ../docsite/rst/dev_guide/testing/sanity/index.rst Sanity Tests ============ The following sanity tests are available as \`\`--test\`\` options for \`\`ansible-test sanity\`\`. This list is also available using \`\`ansible-test sanity --list-tests\`\`. .. toctree:: :maxdepth: 1 $(for test in $(../../test/runner/ansible-test sanity --list-tests); do echo " ${test}"; done) EOF ansible-2.5.1/docs/man/0000755000000000000000000000000013265756221014606 5ustar rootroot00000000000000ansible-2.5.1/docs/man/man1/0000755000000000000000000000000013265756221015442 5ustar rootroot00000000000000ansible-2.5.1/docs/man/man1/ansible-config.10000644000000000000000000000632313265756172020415 0ustar rootroot00000000000000'\" t .\" Title: ansible-config .\" Author: [see the "AUTHOR" section] .\" Generator: DocBook XSL Stylesheets v1.78.1 .\" Date: 04/19/2018 .\" Manual: System administration commands .\" Source: Ansible 2.5.1 .\" Language: English .\" .TH "ANSIBLE\-CONFIG" "1" "04/19/2018" "Ansible 2\&.5\&.1" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" ansible-config \- View, edit, and manage ansible configuration\&. .SH "SYNOPSIS" .sp ansible\-config [view|dump|list] [\-\-help] [options] [ansible\&.cfg] .SH "DESCRIPTION" .sp Config command line class .SH "COMMON OPTIONS" .PP \fB\-\-version\fR .RS 4 show program\(cqs version number and exit .RE .PP \fB\-c\fR \fICONFIG_FILE\fR, \fB\-\-config\fR \fICONFIG_FILE\fR .RS 4 path to configuration file, defaults to first file found in precedence\&. .RE .PP \fB\-h\fR, \fB\-\-help\fR .RS 4 show this help message and exit .RE .PP \fB\-v\fR, \fB\-\-verbose\fR .RS 4 verbose mode (\-vvv for more, \-vvvv to enable connection debugging) .RE .SH "ACTIONS" .PP \fBlist\fR .RS 4 list all current configs reading lib/constants\&.py and shows env and config file setting names .RE .PP \fBdump\fR .RS 4 Shows the current settings, merges ansible\&.cfg if specified .PP \fB\-\-only\-changed\fR .RS 4 Only show configurations that have changed from the default .RE .RE .PP \fBview\fR .RS 4 Displays the current config file .RE .SH "ENVIRONMENT" .sp The following environment variables may be specified\&. .sp ANSIBLE_CONFIG \(em Override the default ansible config file .sp Many more are available for most options in ansible\&.cfg .SH "FILES" .sp /etc/ansible/ansible\&.cfg \(em Config file, used if present .sp ~/\&.ansible\&.cfg \(em User config file, overrides the default config if present .SH "AUTHOR" .sp Ansible was originally written by Michael DeHaan\&. .SH "COPYRIGHT" .sp Copyright \(co 2017 Red Hat, Inc | Ansible\&. Ansible is released under the terms of the GPLv3 License\&. .SH "SEE ALSO" .sp \fBansible\fR(1), \fBansible\-console\fR(1), \fBansible\-doc\fR(1), \fBansible\-galaxy\fR(1), \fBansible\-inventory\fR(1), \fBansible\-playbook\fR(1), \fBansible\-pull\fR(1), \fBansible\-vault\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible ansible-2.5.1/docs/man/man1/ansible-config.1.asciidoc.in0000644000000000000000000000376013265756165022603 0ustar rootroot00000000000000ansible-config(1) ================= :doctype: manpage :encoding: utf-8 :lang: en :man source: Ansible :man version: %VERSION% :man manual: System administration commands NAME ---- ansible-config - View, edit, and manage ansible configuration. SYNOPSIS -------- ansible-config [view|dump|list] [--help] [options] [ansible.cfg] DESCRIPTION ----------- Config command line class COMMON OPTIONS -------------- *--version*:: show program's version number and exit *-c* 'CONFIG_FILE', *--config* 'CONFIG_FILE':: path to configuration file, defaults to first file found in precedence. *-h*, *--help*:: show this help message and exit *-v*, *--verbose*:: verbose mode (-vvv for more, -vvvv to enable connection debugging) ACTIONS ------- *list*::: list all current configs reading lib/constants.py and shows env and config file setting names *dump*::: Shows the current settings, merges ansible.cfg if specified *--only-changed*:: Only show configurations that have changed from the default *view*::: Displays the current config file ENVIRONMENT ----------- The following environment variables may be specified. ANSIBLE_CONFIG -- Override the default ansible config file Many more are available for most options in ansible.cfg FILES ----- /etc/ansible/ansible.cfg -- Config file, used if present ~/.ansible.cfg -- User config file, overrides the default config if present AUTHOR ------ Ansible was originally written by Michael DeHaan. COPYRIGHT --------- Copyright © 2017 Red Hat, Inc | Ansible. Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- *ansible*(1), *ansible-console*(1), *ansible-doc*(1), *ansible-galaxy*(1), *ansible-inventory*(1), *ansible-playbook*(1), *ansible-pull*(1), *ansible-vault*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found in file CONTRIBUTING.md, available in: ansible-2.5.1/docs/man/man1/ansible-console.10000644000000000000000000001466413265756174020623 0ustar rootroot00000000000000'\" t .\" Title: ansible-console .\" Author: [see the "AUTHOR" section] .\" Generator: DocBook XSL Stylesheets v1.78.1 .\" Date: 04/19/2018 .\" Manual: System administration commands .\" Source: Ansible 2.5.1 .\" Language: English .\" .TH "ANSIBLE\-CONSOLE" "1" "04/19/2018" "Ansible 2\&.5\&.1" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" ansible-console \- REPL console for executing Ansible tasks\&. .SH "SYNOPSIS" .sp ansible\-console [] [options] .SH "DESCRIPTION" .sp a REPL that allows for running ad\-hoc tasks against a chosen inventory (based on dominis\*(Aq ansible\-shell)\&. .SH "COMMON OPTIONS" .PP \fB\-\-ask\-su\-pass\fR .RS 4 ask for su password (deprecated, use become) .RE .PP \fB\-\-ask\-sudo\-pass\fR .RS 4 ask for sudo password (deprecated, use become) .RE .PP \fB\-\-ask\-vault\-pass\fR .RS 4 ask for vault password .RE .PP \fB\-\-become\-method\fR \fIBECOME_METHOD\fR .RS 4 privilege escalation method to use (default=sudo), valid choices: [ sudo | su | pbrun | pfexec | doas | dzdo | ksu | runas | pmrun | enable ] .RE .PP \fB\-\-become\-user\fR \fIBECOME_USER\fR .RS 4 run operations as this user (default=root) .RE .PP \fB\-\-list\-hosts\fR .RS 4 outputs a list of matching hosts; does not execute anything else .RE .PP \fB\-\-playbook\-dir\fR \fIBASEDIR\fR .RS 4 Since this tool does not use playbooks, use this as a subsitute playbook directory\&.This sets the relative path for many features including roles/ group_vars/ etc\&. .RE .PP \fB\-\-private\-key\fR, \fB\-\-key\-file\fR .RS 4 use this file to authenticate the connection .RE .PP \fB\-\-scp\-extra\-args\fR \fISCP_EXTRA_ARGS\fR .RS 4 specify extra arguments to pass to scp only (e\&.g\&. \-l) .RE .PP \fB\-\-sftp\-extra\-args\fR \fISFTP_EXTRA_ARGS\fR .RS 4 specify extra arguments to pass to sftp only (e\&.g\&. \-f, \-l) .RE .PP \fB\-\-ssh\-common\-args\fR \fISSH_COMMON_ARGS\fR .RS 4 specify common arguments to pass to sftp/scp/ssh (e\&.g\&. ProxyCommand) .RE .PP \fB\-\-ssh\-extra\-args\fR \fISSH_EXTRA_ARGS\fR .RS 4 specify extra arguments to pass to ssh only (e\&.g\&. \-R) .RE .PP \fB\-\-step\fR .RS 4 one\-step\-at\-a\-time: confirm each task before running .RE .PP \fB\-\-syntax\-check\fR .RS 4 perform a syntax check on the playbook, but do not execute it .RE .PP \fB\-\-vault\-id\fR .RS 4 the vault identity to use .RE .PP \fB\-\-vault\-password\-file\fR .RS 4 vault password file .RE .PP \fB\-\-version\fR .RS 4 show program\(cqs version number and exit .RE .PP \fB\-C\fR, \fB\-\-check\fR .RS 4 don\(cqt make any changes; instead, try to predict some of the changes that may occur .RE .PP \fB\-D\fR, \fB\-\-diff\fR .RS 4 when changing (small) files and templates, show the differences in those files; works great with \-\-check .RE .PP \fB\-K\fR, \fB\-\-ask\-become\-pass\fR .RS 4 ask for privilege escalation password .RE .PP \fB\-M\fR, \fB\-\-module\-path\fR .RS 4 prepend colon\-separated path(s) to module library (default=[u\*(Aq/home/jenkins/\&.ansible/plugins/modules\*(Aq, u\*(Aq/usr/share/ansible/plugins/modules\*(Aq]) .RE .PP \fB\-R\fR \fISU_USER\fR, \fB\-\-su\-user\fR \fISU_USER\fR .RS 4 run operations with su as this user (default=None) (deprecated, use become) .RE .PP \fB\-S\fR, \fB\-\-su\fR .RS 4 run operations with su (deprecated, use become) .RE .PP \fB\-T\fR \fITIMEOUT\fR, \fB\-\-timeout\fR \fITIMEOUT\fR .RS 4 override the connection timeout in seconds (default=10) .RE .PP \fB\-U\fR \fISUDO_USER\fR, \fB\-\-sudo\-user\fR \fISUDO_USER\fR .RS 4 desired sudo user (default=root) (deprecated, use become) .RE .PP \fB\-b\fR, \fB\-\-become\fR .RS 4 run operations with become (does not imply password prompting) .RE .PP \fB\-c\fR \fICONNECTION\fR, \fB\-\-connection\fR \fICONNECTION\fR .RS 4 connection type to use (default=smart) .RE .PP \fB\-f\fR \fIFORKS\fR, \fB\-\-forks\fR \fIFORKS\fR .RS 4 specify number of parallel processes to use (default=5) .RE .PP \fB\-h\fR, \fB\-\-help\fR .RS 4 show this help message and exit .RE .PP \fB\-i\fR, \fB\-\-inventory\fR, \fB\-\-inventory\-file\fR .RS 4 specify inventory host path or comma separated host list\&. \-\-inventory\-file is deprecated .RE .PP \fB\-k\fR, \fB\-\-ask\-pass\fR .RS 4 ask for connection password .RE .PP \fB\-l\fR \fISUBSET\fR, \fB\-\-limit\fR \fISUBSET\fR .RS 4 further limit selected hosts to an additional pattern .RE .PP \fB\-s\fR, \fB\-\-sudo\fR .RS 4 run operations with sudo (nopasswd) (deprecated, use become) .RE .PP \fB\-u\fR \fIREMOTE_USER\fR, \fB\-\-user\fR \fIREMOTE_USER\fR .RS 4 connect as this user (default=None) .RE .PP \fB\-v\fR, \fB\-\-verbose\fR .RS 4 verbose mode (\-vvv for more, \-vvvv to enable connection debugging) .RE .SH "ENVIRONMENT" .sp The following environment variables may be specified\&. .sp ANSIBLE_CONFIG \(em Override the default ansible config file .sp Many more are available for most options in ansible\&.cfg .SH "FILES" .sp /etc/ansible/ansible\&.cfg \(em Config file, used if present .sp ~/\&.ansible\&.cfg \(em User config file, overrides the default config if present .SH "AUTHOR" .sp Ansible was originally written by Michael DeHaan\&. .SH "COPYRIGHT" .sp Copyright \(co 2017 Red Hat, Inc | Ansible\&. Ansible is released under the terms of the GPLv3 License\&. .SH "SEE ALSO" .sp \fBansible\fR(1), \fBansible\-config\fR(1), \fBansible\-doc\fR(1), \fBansible\-galaxy\fR(1), \fBansible\-inventory\fR(1), \fBansible\-playbook\fR(1), \fBansible\-pull\fR(1), \fBansible\-vault\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible ansible-2.5.1/docs/man/man1/ansible-console.1.asciidoc.in0000644000000000000000000001100213265756165022764 0ustar rootroot00000000000000ansible-console(1) ================== :doctype: manpage :encoding: utf-8 :lang: en :man source: Ansible :man version: %VERSION% :man manual: System administration commands NAME ---- ansible-console - REPL console for executing Ansible tasks. SYNOPSIS -------- ansible-console [] [options] DESCRIPTION ----------- a REPL that allows for running ad-hoc tasks against a chosen inventory (based on dominis' ansible-shell). COMMON OPTIONS -------------- *--ask-su-pass*:: ask for su password (deprecated, use become) *--ask-sudo-pass*:: ask for sudo password (deprecated, use become) *--ask-vault-pass*:: ask for vault password *--become-method* 'BECOME_METHOD':: privilege escalation method to use (default=sudo), valid choices: [ sudo | su | pbrun | pfexec | doas | dzdo | ksu | runas | pmrun | enable ] *--become-user* 'BECOME_USER':: run operations as this user (default=root) *--list-hosts*:: outputs a list of matching hosts; does not execute anything else *--playbook-dir* 'BASEDIR':: Since this tool does not use playbooks, use this as a subsitute playbook directory.This sets the relative path for many features including roles/ group_vars/ etc. *--private-key*, *--key-file*:: use this file to authenticate the connection *--scp-extra-args* 'SCP_EXTRA_ARGS':: specify extra arguments to pass to scp only (e.g. -l) *--sftp-extra-args* 'SFTP_EXTRA_ARGS':: specify extra arguments to pass to sftp only (e.g. -f, -l) *--ssh-common-args* 'SSH_COMMON_ARGS':: specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand) *--ssh-extra-args* 'SSH_EXTRA_ARGS':: specify extra arguments to pass to ssh only (e.g. -R) *--step*:: one-step-at-a-time: confirm each task before running *--syntax-check*:: perform a syntax check on the playbook, but do not execute it *--vault-id*:: the vault identity to use *--vault-password-file*:: vault password file *--version*:: show program's version number and exit *-C*, *--check*:: don't make any changes; instead, try to predict some of the changes that may occur *-D*, *--diff*:: when changing (small) files and templates, show the differences in those files; works great with --check *-K*, *--ask-become-pass*:: ask for privilege escalation password *-M*, *--module-path*:: prepend colon-separated path(s) to module library (default=[u'/home/jenkins/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']) *-R* 'SU_USER', *--su-user* 'SU_USER':: run operations with su as this user (default=None) (deprecated, use become) *-S*, *--su*:: run operations with su (deprecated, use become) *-T* 'TIMEOUT', *--timeout* 'TIMEOUT':: override the connection timeout in seconds (default=10) *-U* 'SUDO_USER', *--sudo-user* 'SUDO_USER':: desired sudo user (default=root) (deprecated, use become) *-b*, *--become*:: run operations with become (does not imply password prompting) *-c* 'CONNECTION', *--connection* 'CONNECTION':: connection type to use (default=smart) *-f* 'FORKS', *--forks* 'FORKS':: specify number of parallel processes to use (default=5) *-h*, *--help*:: show this help message and exit *-i*, *--inventory*, *--inventory-file*:: specify inventory host path or comma separated host list. --inventory-file is deprecated *-k*, *--ask-pass*:: ask for connection password *-l* 'SUBSET', *--limit* 'SUBSET':: further limit selected hosts to an additional pattern *-s*, *--sudo*:: run operations with sudo (nopasswd) (deprecated, use become) *-u* 'REMOTE_USER', *--user* 'REMOTE_USER':: connect as this user (default=None) *-v*, *--verbose*:: verbose mode (-vvv for more, -vvvv to enable connection debugging) ENVIRONMENT ----------- The following environment variables may be specified. ANSIBLE_CONFIG -- Override the default ansible config file Many more are available for most options in ansible.cfg FILES ----- /etc/ansible/ansible.cfg -- Config file, used if present ~/.ansible.cfg -- User config file, overrides the default config if present AUTHOR ------ Ansible was originally written by Michael DeHaan. COPYRIGHT --------- Copyright © 2017 Red Hat, Inc | Ansible. Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- *ansible*(1), *ansible-config*(1), *ansible-doc*(1), *ansible-galaxy*(1), *ansible-inventory*(1), *ansible-playbook*(1), *ansible-pull*(1), *ansible-vault*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found in file CONTRIBUTING.md, available in: ansible-2.5.1/docs/man/man1/ansible-doc.10000644000000000000000000000711713265756175017722 0ustar rootroot00000000000000'\" t .\" Title: ansible-doc .\" Author: [see the "AUTHOR" section] .\" Generator: DocBook XSL Stylesheets v1.78.1 .\" Date: 04/19/2018 .\" Manual: System administration commands .\" Source: Ansible 2.5.1 .\" Language: English .\" .TH "ANSIBLE\-DOC" "1" "04/19/2018" "Ansible 2\&.5\&.1" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" ansible-doc \- plugin documentation tool .SH "SYNOPSIS" .sp ansible\-doc [\-l|\-F|\-s] [options] [\-t ] [plugin] .SH "DESCRIPTION" .sp displays information on modules installed in Ansible libraries\&. It displays a terse listing of plugins and their short descriptions, provides a printout of their DOCUMENTATION strings, and it can create a short "snippet" which can be pasted into a playbook\&. .SH "COMMON OPTIONS" .PP \fB\-\-version\fR .RS 4 show program\(cqs version number and exit .RE .PP \fB\-F\fR, \fB\-\-list_files\fR .RS 4 Show plugin names and their source files without summaries (implies \-\-list) .RE .PP \fB\-M\fR, \fB\-\-module\-path\fR .RS 4 prepend colon\-separated path(s) to module library (default=[u\*(Aq/home/jenkins/\&.ansible/plugins/modules\*(Aq, u\*(Aq/usr/share/ansible/plugins/modules\*(Aq]) .RE .PP \fB\-a\fR, \fB\-\-all\fR .RS 4 \fBFor internal testing only\fR Show documentation for all plugins\&. .RE .PP \fB\-h\fR, \fB\-\-help\fR .RS 4 show this help message and exit .RE .PP \fB\-l\fR, \fB\-\-list\fR .RS 4 List available plugins .RE .PP \fB\-s\fR, \fB\-\-snippet\fR .RS 4 Show playbook snippet for specified plugin(s) .RE .PP \fB\-t\fR \fITYPE\fR, \fB\-\-type\fR \fITYPE\fR .RS 4 Choose which plugin type (defaults to "module") .RE .PP \fB\-v\fR, \fB\-\-verbose\fR .RS 4 verbose mode (\-vvv for more, \-vvvv to enable connection debugging) .RE .SH "ENVIRONMENT" .sp The following environment variables may be specified\&. .sp ANSIBLE_CONFIG \(em Override the default ansible config file .sp Many more are available for most options in ansible\&.cfg .SH "FILES" .sp /etc/ansible/ansible\&.cfg \(em Config file, used if present .sp ~/\&.ansible\&.cfg \(em User config file, overrides the default config if present .SH "AUTHOR" .sp Ansible was originally written by Michael DeHaan\&. .SH "COPYRIGHT" .sp Copyright \(co 2017 Red Hat, Inc | Ansible\&. Ansible is released under the terms of the GPLv3 License\&. .SH "SEE ALSO" .sp \fBansible\fR(1), \fBansible\-config\fR(1), \fBansible\-console\fR(1), \fBansible\-galaxy\fR(1), \fBansible\-inventory\fR(1), \fBansible\-playbook\fR(1), \fBansible\-pull\fR(1), \fBansible\-vault\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible ansible-2.5.1/docs/man/man1/ansible-doc.1.asciidoc.in0000644000000000000000000000441013265756165022074 0ustar rootroot00000000000000ansible-doc(1) ============== :doctype: manpage :encoding: utf-8 :lang: en :man source: Ansible :man version: %VERSION% :man manual: System administration commands NAME ---- ansible-doc - plugin documentation tool SYNOPSIS -------- ansible-doc [-l|-F|-s] [options] [-t ] [plugin] DESCRIPTION ----------- displays information on modules installed in Ansible libraries. It displays a terse listing of plugins and their short descriptions, provides a printout of their DOCUMENTATION strings, and it can create a short "snippet" which can be pasted into a playbook. COMMON OPTIONS -------------- *--version*:: show program's version number and exit *-F*, *--list_files*:: Show plugin names and their source files without summaries (implies --list) *-M*, *--module-path*:: prepend colon-separated path(s) to module library (default=[u'/home/jenkins/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']) *-a*, *--all*:: **For internal testing only** Show documentation for all plugins. *-h*, *--help*:: show this help message and exit *-l*, *--list*:: List available plugins *-s*, *--snippet*:: Show playbook snippet for specified plugin(s) *-t* 'TYPE', *--type* 'TYPE':: Choose which plugin type (defaults to "module") *-v*, *--verbose*:: verbose mode (-vvv for more, -vvvv to enable connection debugging) ENVIRONMENT ----------- The following environment variables may be specified. ANSIBLE_CONFIG -- Override the default ansible config file Many more are available for most options in ansible.cfg FILES ----- /etc/ansible/ansible.cfg -- Config file, used if present ~/.ansible.cfg -- User config file, overrides the default config if present AUTHOR ------ Ansible was originally written by Michael DeHaan. COPYRIGHT --------- Copyright © 2017 Red Hat, Inc | Ansible. Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- *ansible*(1), *ansible-config*(1), *ansible-console*(1), *ansible-galaxy*(1), *ansible-inventory*(1), *ansible-playbook*(1), *ansible-pull*(1), *ansible-vault*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found in file CONTRIBUTING.md, available in: ansible-2.5.1/docs/man/man1/ansible-galaxy.10000644000000000000000000001642013265756177020441 0ustar rootroot00000000000000'\" t .\" Title: ansible-galaxy .\" Author: [see the "AUTHOR" section] .\" Generator: DocBook XSL Stylesheets v1.78.1 .\" Date: 04/19/2018 .\" Manual: System administration commands .\" Source: Ansible 2.5.1 .\" Language: English .\" .TH "ANSIBLE\-GALAXY" "1" "04/19/2018" "Ansible 2\&.5\&.1" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" ansible-galaxy \- None .SH "SYNOPSIS" .sp ansible\-galaxy [delete|import|info|init|install|list|login|remove|search|setup] [\-\-help] [options] \&... .SH "DESCRIPTION" .sp command to manage Ansible roles in shared repostories, the default of which is Ansible Galaxy \fBhttps://galaxy\&.ansible\&.com\fR\&. .SH "COMMON OPTIONS" .PP \fB\-\-list\fR .RS 4 List all of your integrations\&. .RE .PP \fB\-\-remove\fR \fIREMOVE_ID\fR .RS 4 Remove the integration matching the provided ID value\&. Use \-\-list to see ID values\&. .RE .PP \fB\-\-version\fR .RS 4 show program\(cqs version number and exit .RE .PP \fB\-c\fR, \fB\-\-ignore\-certs\fR .RS 4 Ignore SSL certificate validation errors\&. .RE .PP \fB\-h\fR, \fB\-\-help\fR .RS 4 show this help message and exit .RE .PP \fB\-s\fR \fIAPI_SERVER\fR, \fB\-\-server\fR \fIAPI_SERVER\fR .RS 4 The API server destination .RE .PP \fB\-v\fR, \fB\-\-verbose\fR .RS 4 verbose mode (\-vvv for more, \-vvvv to enable connection debugging) .RE .SH "ACTIONS" .PP \fBinfo\fR .RS 4 prints out detailed information about an installed role as well as info available from the galaxy API\&. .PP \fB\-\-offline\fR .RS 4 Don\(cqt query the galaxy API when creating roles .RE .PP \fB\-p\fR, \fB\-\-roles\-path\fR .RS 4 The path to the directory containing your roles\&. The default is the roles_path configured in your ansible\&.cfgfile (/etc/ansible/roles if not configured) .RE .RE .PP \fBsearch\fR .RS 4 searches for roles on the Ansible Galaxy server .PP \fB\-\-author\fR \fIAUTHOR\fR .RS 4 GitHub username .RE .PP \fB\-\-galaxy\-tags\fR \fIGALAXY_TAGS\fR .RS 4 list of galaxy tags to filter by .RE .PP \fB\-\-platforms\fR \fIPLATFORMS\fR .RS 4 list of OS platforms to filter by .RE .PP \fB\-p\fR, \fB\-\-roles\-path\fR .RS 4 The path to the directory containing your roles\&. The default is the roles_path configured in your ansible\&.cfgfile (/etc/ansible/roles if not configured) .RE .RE .PP \fBsetup\fR .RS 4 Setup an integration from Github or Travis for Ansible Galaxy roles .PP \fB\-\-list\fR .RS 4 List all of your integrations\&. .RE .PP \fB\-\-remove\fR \fIREMOVE_ID\fR .RS 4 Remove the integration matching the provided ID value\&. Use \-\-list to see ID values\&. .RE .RE .PP \fBlist\fR .RS 4 lists the roles installed on the local system or matches a single role passed as an argument\&. .PP \fB\-p\fR, \fB\-\-roles\-path\fR .RS 4 The path to the directory containing your roles\&. The default is the roles_path configured in your ansible\&.cfgfile (/etc/ansible/roles if not configured) .RE .RE .PP \fBremove\fR .RS 4 removes the list of roles passed as arguments from the local system\&. .PP \fB\-p\fR, \fB\-\-roles\-path\fR .RS 4 The path to the directory containing your roles\&. The default is the roles_path configured in your ansible\&.cfgfile (/etc/ansible/roles if not configured) .RE .RE .PP \fBinit\fR .RS 4 creates the skeleton framework of a role that complies with the galaxy metadata format\&. .PP \fB\-\-container\-enabled\fR .RS 4 Initialize the skeleton role with default contents for a Container Enabled role\&. .RE .PP \fB\-\-init\-path\fR \fIINIT_PATH\fR .RS 4 The path in which the skeleton role will be created\&. The default is the current working directory\&. .RE .PP \fB\-\-offline\fR .RS 4 Don\(cqt query the galaxy API when creating roles .RE .PP \fB\-\-role\-skeleton\fR \fIROLE_SKELETON\fR .RS 4 The path to a role skeleton that the new role should be based upon\&. .RE .PP \fB\-f\fR, \fB\-\-force\fR .RS 4 Force overwriting an existing role .RE .RE .PP \fBinstall\fR .RS 4 uses the args list of roles to be installed, unless \-f was specified\&. The list of roles can be a name (which will be downloaded via the galaxy API and github), or it can be a local \&.tar\&.gz file\&. .PP \fB\-f\fR, \fB\-\-force\fR .RS 4 Force overwriting an existing role .RE .PP \fB\-i\fR, \fB\-\-ignore\-errors\fR .RS 4 Ignore errors and continue with the next specified role\&. .RE .PP \fB\-n\fR, \fB\-\-no\-deps\fR .RS 4 Don\(cqt download roles listed as dependencies .RE .PP \fB\-p\fR, \fB\-\-roles\-path\fR .RS 4 The path to the directory containing your roles\&. The default is the roles_path configured in your ansible\&.cfgfile (/etc/ansible/roles if not configured) .RE .PP \fB\-r\fR \fIROLE_FILE\fR, \fB\-\-role\-file\fR \fIROLE_FILE\fR .RS 4 A file containing a list of roles to be imported .RE .RE .PP \fBimport\fR .RS 4 used to import a role into Ansible Galaxy .PP \fB\-\-branch\fR \fIREFERENCE\fR .RS 4 The name of a branch to import\&. Defaults to the repository\(cqs default branch (usually master) .RE .PP \fB\-\-no\-wait\fR .RS 4 Don\(cqt wait for import results\&. .RE .PP \fB\-\-role\-name\fR \fIROLE_NAME\fR .RS 4 The name the role should have, if different than the repo name .RE .PP \fB\-\-status\fR .RS 4 Check the status of the most recent import request for given github_user/github_repo\&. .RE .RE .PP \fBlogin\fR .RS 4 verify user\(cqs identify via Github and retrieve an auth token from Ansible Galaxy\&. .PP \fB\-\-github\-token\fR \fITOKEN\fR .RS 4 Identify with github token rather than username and password\&. .RE .RE .PP \fBdelete\fR .RS 4 Delete a role from Ansible Galaxy\&. .RE .SH "ENVIRONMENT" .sp The following environment variables may be specified\&. .sp ANSIBLE_CONFIG \(em Override the default ansible config file .sp Many more are available for most options in ansible\&.cfg .SH "FILES" .sp /etc/ansible/ansible\&.cfg \(em Config file, used if present .sp ~/\&.ansible\&.cfg \(em User config file, overrides the default config if present .SH "AUTHOR" .sp Ansible was originally written by Michael DeHaan\&. .SH "COPYRIGHT" .sp Copyright \(co 2017 Red Hat, Inc | Ansible\&. Ansible is released under the terms of the GPLv3 License\&. .SH "SEE ALSO" .sp \fBansible\fR(1), \fBansible\-config\fR(1), \fBansible\-console\fR(1), \fBansible\-doc\fR(1), \fBansible\-inventory\fR(1), \fBansible\-playbook\fR(1), \fBansible\-pull\fR(1), \fBansible\-vault\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible ansible-2.5.1/docs/man/man1/ansible-galaxy.1.asciidoc.in0000644000000000000000000001307613265756165022624 0ustar rootroot00000000000000ansible-galaxy(1) ================= :doctype: manpage :encoding: utf-8 :lang: en :man source: Ansible :man version: %VERSION% :man manual: System administration commands NAME ---- ansible-galaxy - None SYNOPSIS -------- ansible-galaxy [delete|import|info|init|install|list|login|remove|search|setup] [--help] [options] ... DESCRIPTION ----------- command to manage Ansible roles in shared repostories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*. COMMON OPTIONS -------------- *--list*:: List all of your integrations. *--remove* 'REMOVE_ID':: Remove the integration matching the provided ID value. Use --list to see ID values. *--version*:: show program's version number and exit *-c*, *--ignore-certs*:: Ignore SSL certificate validation errors. *-h*, *--help*:: show this help message and exit *-s* 'API_SERVER', *--server* 'API_SERVER':: The API server destination *-v*, *--verbose*:: verbose mode (-vvv for more, -vvvv to enable connection debugging) ACTIONS ------- *info*::: prints out detailed information about an installed role as well as info available from the galaxy API. *--offline*:: Don't query the galaxy API when creating roles *-p*, *--roles-path*:: The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfgfile (/etc/ansible/roles if not configured) *search*::: searches for roles on the Ansible Galaxy server *--author* 'AUTHOR':: GitHub username *--galaxy-tags* 'GALAXY_TAGS':: list of galaxy tags to filter by *--platforms* 'PLATFORMS':: list of OS platforms to filter by *-p*, *--roles-path*:: The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfgfile (/etc/ansible/roles if not configured) *setup*::: Setup an integration from Github or Travis for Ansible Galaxy roles *--list*:: List all of your integrations. *--remove* 'REMOVE_ID':: Remove the integration matching the provided ID value. Use --list to see ID values. *list*::: lists the roles installed on the local system or matches a single role passed as an argument. *-p*, *--roles-path*:: The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfgfile (/etc/ansible/roles if not configured) *remove*::: removes the list of roles passed as arguments from the local system. *-p*, *--roles-path*:: The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfgfile (/etc/ansible/roles if not configured) *init*::: creates the skeleton framework of a role that complies with the galaxy metadata format. *--container-enabled*:: Initialize the skeleton role with default contents for a Container Enabled role. *--init-path* 'INIT_PATH':: The path in which the skeleton role will be created. The default is the current working directory. *--offline*:: Don't query the galaxy API when creating roles *--role-skeleton* 'ROLE_SKELETON':: The path to a role skeleton that the new role should be based upon. *-f*, *--force*:: Force overwriting an existing role *install*::: uses the args list of roles to be installed, unless -f was specified. The list of roles can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file. *-f*, *--force*:: Force overwriting an existing role *-i*, *--ignore-errors*:: Ignore errors and continue with the next specified role. *-n*, *--no-deps*:: Don't download roles listed as dependencies *-p*, *--roles-path*:: The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfgfile (/etc/ansible/roles if not configured) *-r* 'ROLE_FILE', *--role-file* 'ROLE_FILE':: A file containing a list of roles to be imported *import*::: used to import a role into Ansible Galaxy *--branch* 'REFERENCE':: The name of a branch to import. Defaults to the repository's default branch (usually master) *--no-wait*:: Don't wait for import results. *--role-name* 'ROLE_NAME':: The name the role should have, if different than the repo name *--status*:: Check the status of the most recent import request for given github_user/github_repo. *login*::: verify user's identify via Github and retrieve an auth token from Ansible Galaxy. *--github-token* 'TOKEN':: Identify with github token rather than username and password. *delete*::: Delete a role from Ansible Galaxy. ENVIRONMENT ----------- The following environment variables may be specified. ANSIBLE_CONFIG -- Override the default ansible config file Many more are available for most options in ansible.cfg FILES ----- /etc/ansible/ansible.cfg -- Config file, used if present ~/.ansible.cfg -- User config file, overrides the default config if present AUTHOR ------ Ansible was originally written by Michael DeHaan. COPYRIGHT --------- Copyright © 2017 Red Hat, Inc | Ansible. Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- *ansible*(1), *ansible-config*(1), *ansible-console*(1), *ansible-doc*(1), *ansible-inventory*(1), *ansible-playbook*(1), *ansible-pull*(1), *ansible-vault*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found in file CONTRIBUTING.md, available in: ansible-2.5.1/docs/man/man1/ansible-inventory.10000644000000000000000000000755013265756173021211 0ustar rootroot00000000000000'\" t .\" Title: ansible-inventory .\" Author: [see the "AUTHOR" section] .\" Generator: DocBook XSL Stylesheets v1.78.1 .\" Date: 04/19/2018 .\" Manual: System administration commands .\" Source: Ansible 2.5.1 .\" Language: English .\" .TH "ANSIBLE\-INVENTORY" "1" "04/19/2018" "Ansible 2\&.5\&.1" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" ansible-inventory \- None .SH "SYNOPSIS" .sp ansible\-inventory [options] [host|group] .SH "DESCRIPTION" .sp used to display or dump the configured inventory as Ansible sees it .SH "COMMON OPTIONS" .PP \fB\-\-ask\-vault\-pass\fR .RS 4 ask for vault password .RE .PP \fB\-\-export\fR .RS 4 When doing an \-\-list, represent in a way that is optimized for export,not as an accurate representation of how Ansible has processed it .RE .PP \fB\-\-graph\fR .RS 4 create inventory graph, if supplying pattern it must be a valid group name .RE .PP \fB\-\-host\fR \fIHOST\fR .RS 4 Output specific host info, works as inventory script .RE .PP \fB\-\-list\fR .RS 4 Output all hosts info, works as inventory script .RE .PP \fB\-\-playbook\-dir\fR \fIBASEDIR\fR .RS 4 Since this tool does not use playbooks, use this as a subsitute playbook directory\&.This sets the relative path for many features including roles/ group_vars/ etc\&. .RE .PP \fB\-\-vars\fR .RS 4 Add vars to graph display, ignored unless used with \-\-graph .RE .PP \fB\-\-vault\-id\fR .RS 4 the vault identity to use .RE .PP \fB\-\-vault\-password\-file\fR .RS 4 vault password file .RE .PP \fB\-\-version\fR .RS 4 show program\(cqs version number and exit .RE .PP \fB\-h\fR, \fB\-\-help\fR .RS 4 show this help message and exit .RE .PP \fB\-i\fR, \fB\-\-inventory\fR, \fB\-\-inventory\-file\fR .RS 4 specify inventory host path or comma separated host list\&. \-\-inventory\-file is deprecated .RE .PP \fB\-v\fR, \fB\-\-verbose\fR .RS 4 verbose mode (\-vvv for more, \-vvvv to enable connection debugging) .RE .PP \fB\-y\fR, \fB\-\-yaml\fR .RS 4 Use YAML format instead of default JSON, ignored for \-\-graph .RE .SH "ENVIRONMENT" .sp The following environment variables may be specified\&. .sp ANSIBLE_CONFIG \(em Override the default ansible config file .sp Many more are available for most options in ansible\&.cfg .SH "FILES" .sp /etc/ansible/ansible\&.cfg \(em Config file, used if present .sp ~/\&.ansible\&.cfg \(em User config file, overrides the default config if present .SH "AUTHOR" .sp Ansible was originally written by Michael DeHaan\&. .SH "COPYRIGHT" .sp Copyright \(co 2017 Red Hat, Inc | Ansible\&. Ansible is released under the terms of the GPLv3 License\&. .SH "SEE ALSO" .sp \fBansible\fR(1), \fBansible\-config\fR(1), \fBansible\-console\fR(1), \fBansible\-doc\fR(1), \fBansible\-galaxy\fR(1), \fBansible\-playbook\fR(1), \fBansible\-pull\fR(1), \fBansible\-vault\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible ansible-2.5.1/docs/man/man1/ansible-inventory.1.asciidoc.in0000644000000000000000000000475313265756165023376 0ustar rootroot00000000000000ansible-inventory(1) ==================== :doctype: manpage :encoding: utf-8 :lang: en :man source: Ansible :man version: %VERSION% :man manual: System administration commands NAME ---- ansible-inventory - None SYNOPSIS -------- ansible-inventory [options] [host|group] DESCRIPTION ----------- used to display or dump the configured inventory as Ansible sees it COMMON OPTIONS -------------- *--ask-vault-pass*:: ask for vault password *--export*:: When doing an --list, represent in a way that is optimized for export,not as an accurate representation of how Ansible has processed it *--graph*:: create inventory graph, if supplying pattern it must be a valid group name *--host* 'HOST':: Output specific host info, works as inventory script *--list*:: Output all hosts info, works as inventory script *--playbook-dir* 'BASEDIR':: Since this tool does not use playbooks, use this as a subsitute playbook directory.This sets the relative path for many features including roles/ group_vars/ etc. *--vars*:: Add vars to graph display, ignored unless used with --graph *--vault-id*:: the vault identity to use *--vault-password-file*:: vault password file *--version*:: show program's version number and exit *-h*, *--help*:: show this help message and exit *-i*, *--inventory*, *--inventory-file*:: specify inventory host path or comma separated host list. --inventory-file is deprecated *-v*, *--verbose*:: verbose mode (-vvv for more, -vvvv to enable connection debugging) *-y*, *--yaml*:: Use YAML format instead of default JSON, ignored for --graph ENVIRONMENT ----------- The following environment variables may be specified. ANSIBLE_CONFIG -- Override the default ansible config file Many more are available for most options in ansible.cfg FILES ----- /etc/ansible/ansible.cfg -- Config file, used if present ~/.ansible.cfg -- User config file, overrides the default config if present AUTHOR ------ Ansible was originally written by Michael DeHaan. COPYRIGHT --------- Copyright © 2017 Red Hat, Inc | Ansible. Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- *ansible*(1), *ansible-config*(1), *ansible-console*(1), *ansible-doc*(1), *ansible-galaxy*(1), *ansible-playbook*(1), *ansible-pull*(1), *ansible-vault*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found in file CONTRIBUTING.md, available in: ansible-2.5.1/docs/man/man1/ansible-playbook.10000644000000000000000000001603513265756171020770 0ustar rootroot00000000000000'\" t .\" Title: ansible-playbook .\" Author: [see the "AUTHOR" section] .\" Generator: DocBook XSL Stylesheets v1.78.1 .\" Date: 04/19/2018 .\" Manual: System administration commands .\" Source: Ansible 2.5.1 .\" Language: English .\" .TH "ANSIBLE\-PLAYBOOK" "1" "04/19/2018" "Ansible 2\&.5\&.1" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" ansible-playbook \- Runs Ansible playbooks, executing the defined tasks on the targeted hosts\&. .SH "SYNOPSIS" .sp ansible\-playbook [options] playbook\&.yml [playbook2 \&...] .SH "DESCRIPTION" .sp the tool to run \fBAnsible playbooks\fR, which are a configuration and multinode deployment system\&. See the project home page (https://docs\&.ansible\&.com) for more information\&. .SH "COMMON OPTIONS" .PP \fB\-\-ask\-su\-pass\fR .RS 4 ask for su password (deprecated, use become) .RE .PP \fB\-\-ask\-sudo\-pass\fR .RS 4 ask for sudo password (deprecated, use become) .RE .PP \fB\-\-ask\-vault\-pass\fR .RS 4 ask for vault password .RE .PP \fB\-\-become\-method\fR \fIBECOME_METHOD\fR .RS 4 privilege escalation method to use (default=sudo), valid choices: [ sudo | su | pbrun | pfexec | doas | dzdo | ksu | runas | pmrun | enable ] .RE .PP \fB\-\-become\-user\fR \fIBECOME_USER\fR .RS 4 run operations as this user (default=root) .RE .PP \fB\-\-flush\-cache\fR .RS 4 clear the fact cache for every host in inventory .RE .PP \fB\-\-force\-handlers\fR .RS 4 run handlers even if a task fails .RE .PP \fB\-\-list\-hosts\fR .RS 4 outputs a list of matching hosts; does not execute anything else .RE .PP \fB\-\-list\-tags\fR .RS 4 list all available tags .RE .PP \fB\-\-list\-tasks\fR .RS 4 list all tasks that would be executed .RE .PP \fB\-\-private\-key\fR, \fB\-\-key\-file\fR .RS 4 use this file to authenticate the connection .RE .PP \fB\-\-scp\-extra\-args\fR \fISCP_EXTRA_ARGS\fR .RS 4 specify extra arguments to pass to scp only (e\&.g\&. \-l) .RE .PP \fB\-\-sftp\-extra\-args\fR \fISFTP_EXTRA_ARGS\fR .RS 4 specify extra arguments to pass to sftp only (e\&.g\&. \-f, \-l) .RE .PP \fB\-\-skip\-tags\fR .RS 4 only run plays and tasks whose tags do not match these values .RE .PP \fB\-\-ssh\-common\-args\fR \fISSH_COMMON_ARGS\fR .RS 4 specify common arguments to pass to sftp/scp/ssh (e\&.g\&. ProxyCommand) .RE .PP \fB\-\-ssh\-extra\-args\fR \fISSH_EXTRA_ARGS\fR .RS 4 specify extra arguments to pass to ssh only (e\&.g\&. \-R) .RE .PP \fB\-\-start\-at\-task\fR \fISTART_AT_TASK\fR .RS 4 start the playbook at the task matching this name .RE .PP \fB\-\-step\fR .RS 4 one\-step\-at\-a\-time: confirm each task before running .RE .PP \fB\-\-syntax\-check\fR .RS 4 perform a syntax check on the playbook, but do not execute it .RE .PP \fB\-\-vault\-id\fR .RS 4 the vault identity to use .RE .PP \fB\-\-vault\-password\-file\fR .RS 4 vault password file .RE .PP \fB\-\-version\fR .RS 4 show program\(cqs version number and exit .RE .PP \fB\-C\fR, \fB\-\-check\fR .RS 4 don\(cqt make any changes; instead, try to predict some of the changes that may occur .RE .PP \fB\-D\fR, \fB\-\-diff\fR .RS 4 when changing (small) files and templates, show the differences in those files; works great with \-\-check .RE .PP \fB\-K\fR, \fB\-\-ask\-become\-pass\fR .RS 4 ask for privilege escalation password .RE .PP \fB\-M\fR, \fB\-\-module\-path\fR .RS 4 prepend colon\-separated path(s) to module library (default=[u\*(Aq/home/jenkins/\&.ansible/plugins/modules\*(Aq, u\*(Aq/usr/share/ansible/plugins/modules\*(Aq]) .RE .PP \fB\-R\fR \fISU_USER\fR, \fB\-\-su\-user\fR \fISU_USER\fR .RS 4 run operations with su as this user (default=None) (deprecated, use become) .RE .PP \fB\-S\fR, \fB\-\-su\fR .RS 4 run operations with su (deprecated, use become) .RE .PP \fB\-T\fR \fITIMEOUT\fR, \fB\-\-timeout\fR \fITIMEOUT\fR .RS 4 override the connection timeout in seconds (default=10) .RE .PP \fB\-U\fR \fISUDO_USER\fR, \fB\-\-sudo\-user\fR \fISUDO_USER\fR .RS 4 desired sudo user (default=root) (deprecated, use become) .RE .PP \fB\-b\fR, \fB\-\-become\fR .RS 4 run operations with become (does not imply password prompting) .RE .PP \fB\-c\fR \fICONNECTION\fR, \fB\-\-connection\fR \fICONNECTION\fR .RS 4 connection type to use (default=smart) .RE .PP \fB\-e\fR, \fB\-\-extra\-vars\fR .RS 4 set additional variables as key=value or YAML/JSON, if filename prepend with @ .RE .PP \fB\-f\fR \fIFORKS\fR, \fB\-\-forks\fR \fIFORKS\fR .RS 4 specify number of parallel processes to use (default=5) .RE .PP \fB\-h\fR, \fB\-\-help\fR .RS 4 show this help message and exit .RE .PP \fB\-i\fR, \fB\-\-inventory\fR, \fB\-\-inventory\-file\fR .RS 4 specify inventory host path or comma separated host list\&. \-\-inventory\-file is deprecated .RE .PP \fB\-k\fR, \fB\-\-ask\-pass\fR .RS 4 ask for connection password .RE .PP \fB\-l\fR \fISUBSET\fR, \fB\-\-limit\fR \fISUBSET\fR .RS 4 further limit selected hosts to an additional pattern .RE .PP \fB\-s\fR, \fB\-\-sudo\fR .RS 4 run operations with sudo (nopasswd) (deprecated, use become) .RE .PP \fB\-t\fR, \fB\-\-tags\fR .RS 4 only run plays and tasks tagged with these values .RE .PP \fB\-u\fR \fIREMOTE_USER\fR, \fB\-\-user\fR \fIREMOTE_USER\fR .RS 4 connect as this user (default=None) .RE .PP \fB\-v\fR, \fB\-\-verbose\fR .RS 4 verbose mode (\-vvv for more, \-vvvv to enable connection debugging) .RE .SH "ENVIRONMENT" .sp The following environment variables may be specified\&. .sp ANSIBLE_CONFIG \(em Override the default ansible config file .sp Many more are available for most options in ansible\&.cfg .SH "FILES" .sp /etc/ansible/ansible\&.cfg \(em Config file, used if present .sp ~/\&.ansible\&.cfg \(em User config file, overrides the default config if present .SH "AUTHOR" .sp Ansible was originally written by Michael DeHaan\&. .SH "COPYRIGHT" .sp Copyright \(co 2017 Red Hat, Inc | Ansible\&. Ansible is released under the terms of the GPLv3 License\&. .SH "SEE ALSO" .sp \fBansible\fR(1), \fBansible\-config\fR(1), \fBansible\-console\fR(1), \fBansible\-doc\fR(1), \fBansible\-galaxy\fR(1), \fBansible\-inventory\fR(1), \fBansible\-pull\fR(1), \fBansible\-vault\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible ansible-2.5.1/docs/man/man1/ansible-playbook.1.asciidoc.in0000644000000000000000000001174713265756165023162 0ustar rootroot00000000000000ansible-playbook(1) =================== :doctype: manpage :encoding: utf-8 :lang: en :man source: Ansible :man version: %VERSION% :man manual: System administration commands NAME ---- ansible-playbook - Runs Ansible playbooks, executing the defined tasks on the targeted hosts. SYNOPSIS -------- ansible-playbook [options] playbook.yml [playbook2 ...] DESCRIPTION ----------- the tool to run *Ansible playbooks*, which are a configuration and multinode deployment system. See the project home page (https://docs.ansible.com) for more information. COMMON OPTIONS -------------- *--ask-su-pass*:: ask for su password (deprecated, use become) *--ask-sudo-pass*:: ask for sudo password (deprecated, use become) *--ask-vault-pass*:: ask for vault password *--become-method* 'BECOME_METHOD':: privilege escalation method to use (default=sudo), valid choices: [ sudo | su | pbrun | pfexec | doas | dzdo | ksu | runas | pmrun | enable ] *--become-user* 'BECOME_USER':: run operations as this user (default=root) *--flush-cache*:: clear the fact cache for every host in inventory *--force-handlers*:: run handlers even if a task fails *--list-hosts*:: outputs a list of matching hosts; does not execute anything else *--list-tags*:: list all available tags *--list-tasks*:: list all tasks that would be executed *--private-key*, *--key-file*:: use this file to authenticate the connection *--scp-extra-args* 'SCP_EXTRA_ARGS':: specify extra arguments to pass to scp only (e.g. -l) *--sftp-extra-args* 'SFTP_EXTRA_ARGS':: specify extra arguments to pass to sftp only (e.g. -f, -l) *--skip-tags*:: only run plays and tasks whose tags do not match these values *--ssh-common-args* 'SSH_COMMON_ARGS':: specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand) *--ssh-extra-args* 'SSH_EXTRA_ARGS':: specify extra arguments to pass to ssh only (e.g. -R) *--start-at-task* 'START_AT_TASK':: start the playbook at the task matching this name *--step*:: one-step-at-a-time: confirm each task before running *--syntax-check*:: perform a syntax check on the playbook, but do not execute it *--vault-id*:: the vault identity to use *--vault-password-file*:: vault password file *--version*:: show program's version number and exit *-C*, *--check*:: don't make any changes; instead, try to predict some of the changes that may occur *-D*, *--diff*:: when changing (small) files and templates, show the differences in those files; works great with --check *-K*, *--ask-become-pass*:: ask for privilege escalation password *-M*, *--module-path*:: prepend colon-separated path(s) to module library (default=[u'/home/jenkins/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']) *-R* 'SU_USER', *--su-user* 'SU_USER':: run operations with su as this user (default=None) (deprecated, use become) *-S*, *--su*:: run operations with su (deprecated, use become) *-T* 'TIMEOUT', *--timeout* 'TIMEOUT':: override the connection timeout in seconds (default=10) *-U* 'SUDO_USER', *--sudo-user* 'SUDO_USER':: desired sudo user (default=root) (deprecated, use become) *-b*, *--become*:: run operations with become (does not imply password prompting) *-c* 'CONNECTION', *--connection* 'CONNECTION':: connection type to use (default=smart) *-e*, *--extra-vars*:: set additional variables as key=value or YAML/JSON, if filename prepend with @ *-f* 'FORKS', *--forks* 'FORKS':: specify number of parallel processes to use (default=5) *-h*, *--help*:: show this help message and exit *-i*, *--inventory*, *--inventory-file*:: specify inventory host path or comma separated host list. --inventory-file is deprecated *-k*, *--ask-pass*:: ask for connection password *-l* 'SUBSET', *--limit* 'SUBSET':: further limit selected hosts to an additional pattern *-s*, *--sudo*:: run operations with sudo (nopasswd) (deprecated, use become) *-t*, *--tags*:: only run plays and tasks tagged with these values *-u* 'REMOTE_USER', *--user* 'REMOTE_USER':: connect as this user (default=None) *-v*, *--verbose*:: verbose mode (-vvv for more, -vvvv to enable connection debugging) ENVIRONMENT ----------- The following environment variables may be specified. ANSIBLE_CONFIG -- Override the default ansible config file Many more are available for most options in ansible.cfg FILES ----- /etc/ansible/ansible.cfg -- Config file, used if present ~/.ansible.cfg -- User config file, overrides the default config if present AUTHOR ------ Ansible was originally written by Michael DeHaan. COPYRIGHT --------- Copyright © 2017 Red Hat, Inc | Ansible. Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- *ansible*(1), *ansible-config*(1), *ansible-console*(1), *ansible-doc*(1), *ansible-galaxy*(1), *ansible-inventory*(1), *ansible-pull*(1), *ansible-vault*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found in file CONTRIBUTING.md, available in: ansible-2.5.1/docs/man/man1/ansible-pull.10000644000000000000000000001657613265756176020143 0ustar rootroot00000000000000'\" t .\" Title: ansible-pull .\" Author: [see the "AUTHOR" section] .\" Generator: DocBook XSL Stylesheets v1.78.1 .\" Date: 04/19/2018 .\" Manual: System administration commands .\" Source: Ansible 2.5.1 .\" Language: English .\" .TH "ANSIBLE\-PULL" "1" "04/19/2018" "Ansible 2\&.5\&.1" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" ansible-pull \- pulls playbooks from a VCS repo and executes them for the local host .SH "SYNOPSIS" .sp ansible\-pull \-U [options] [] .SH "DESCRIPTION" .sp is used to up a remote copy of ansible on each managed node, each set to run via cron and update playbook source via a source repository\&. This inverts the default \fBpush\fR architecture of ansible into a \fBpull\fR architecture, which has near\-limitless scaling potential\&. .sp The setup playbook can be tuned to change the cron frequency, logging locations, and parameters to ansible\-pull\&. This is useful both for extreme scale\-out as well as periodic remediation\&. Usage of the \fIfetch\fR module to retrieve logs from ansible\-pull runs would be an excellent way to gather and analyze remote logs from ansible\-pull\&. .SH "COMMON OPTIONS" .PP \fB\-\-accept\-host\-key\fR .RS 4 adds the hostkey for the repo url if not already added .RE .PP \fB\-\-ask\-su\-pass\fR .RS 4 ask for su password (deprecated, use become) .RE .PP \fB\-\-ask\-sudo\-pass\fR .RS 4 ask for sudo password (deprecated, use become) .RE .PP \fB\-\-ask\-vault\-pass\fR .RS 4 ask for vault password .RE .PP \fB\-\-check\fR .RS 4 don\(cqt make any changes; instead, try to predict some of the changes that may occur .RE .PP \fB\-\-clean\fR .RS 4 modified files in the working repository will be discarded .RE .PP \fB\-\-full\fR .RS 4 Do a full clone, instead of a shallow one\&. .RE .PP \fB\-\-list\-hosts\fR .RS 4 outputs a list of matching hosts; does not execute anything else .RE .PP \fB\-\-private\-key\fR, \fB\-\-key\-file\fR .RS 4 use this file to authenticate the connection .RE .PP \fB\-\-purge\fR .RS 4 purge checkout after playbook run .RE .PP \fB\-\-scp\-extra\-args\fR \fISCP_EXTRA_ARGS\fR .RS 4 specify extra arguments to pass to scp only (e\&.g\&. \-l) .RE .PP \fB\-\-sftp\-extra\-args\fR \fISFTP_EXTRA_ARGS\fR .RS 4 specify extra arguments to pass to sftp only (e\&.g\&. \-f, \-l) .RE .PP \fB\-\-skip\-tags\fR .RS 4 only run plays and tasks whose tags do not match these values .RE .PP \fB\-\-ssh\-common\-args\fR \fISSH_COMMON_ARGS\fR .RS 4 specify common arguments to pass to sftp/scp/ssh (e\&.g\&. ProxyCommand) .RE .PP \fB\-\-ssh\-extra\-args\fR \fISSH_EXTRA_ARGS\fR .RS 4 specify extra arguments to pass to ssh only (e\&.g\&. \-R) .RE .PP \fB\-\-track\-subs\fR .RS 4 submodules will track the latest changes\&. This is equivalent to specifying the \-\-remote flag to git submodule update .RE .PP \fB\-\-vault\-id\fR .RS 4 the vault identity to use .RE .PP \fB\-\-vault\-password\-file\fR .RS 4 vault password file .RE .PP \fB\-\-verify\-commit\fR .RS 4 verify GPG signature of checked out commit, if it fails abort running the playbook\&. This needs the corresponding VCS module to support such an operation .RE .PP \fB\-\-version\fR .RS 4 show program\(cqs version number and exit .RE .PP \fB\-C\fR \fICHECKOUT\fR, \fB\-\-checkout\fR \fICHECKOUT\fR .RS 4 branch/tag/commit to checkout\&. Defaults to behavior of repository module\&. .RE .PP \fB\-K\fR, \fB\-\-ask\-become\-pass\fR .RS 4 ask for privilege escalation password .RE .PP \fB\-M\fR, \fB\-\-module\-path\fR .RS 4 prepend colon\-separated path(s) to module library (default=[u\*(Aq/home/jenkins/\&.ansible/plugins/modules\*(Aq, u\*(Aq/usr/share/ansible/plugins/modules\*(Aq]) .RE .PP \fB\-T\fR \fITIMEOUT\fR, \fB\-\-timeout\fR \fITIMEOUT\fR .RS 4 override the connection timeout in seconds (default=10) .RE .PP \fB\-U\fR \fIURL\fR, \fB\-\-url\fR \fIURL\fR .RS 4 URL of the playbook repository .RE .PP \fB\-c\fR \fICONNECTION\fR, \fB\-\-connection\fR \fICONNECTION\fR .RS 4 connection type to use (default=smart) .RE .PP \fB\-d\fR \fIDEST\fR, \fB\-\-directory\fR \fIDEST\fR .RS 4 directory to checkout repository to .RE .PP \fB\-e\fR, \fB\-\-extra\-vars\fR .RS 4 set additional variables as key=value or YAML/JSON, if filename prepend with @ .RE .PP \fB\-f\fR, \fB\-\-force\fR .RS 4 run the playbook even if the repository could not be updated .RE .PP \fB\-h\fR, \fB\-\-help\fR .RS 4 show this help message and exit .RE .PP \fB\-i\fR, \fB\-\-inventory\fR, \fB\-\-inventory\-file\fR .RS 4 specify inventory host path or comma separated host list\&. \-\-inventory\-file is deprecated .RE .PP \fB\-k\fR, \fB\-\-ask\-pass\fR .RS 4 ask for connection password .RE .PP \fB\-l\fR \fISUBSET\fR, \fB\-\-limit\fR \fISUBSET\fR .RS 4 further limit selected hosts to an additional pattern .RE .PP \fB\-m\fR \fIMODULE_NAME\fR, \fB\-\-module\-name\fR \fIMODULE_NAME\fR .RS 4 Repository module name, which ansible will use to check out the repo\&. Choices are (\fIgit\fR, \fIsubversion\fR, \fIhg\fR, \fIbzr\fR)\&. Default is git\&. .RE .PP \fB\-o\fR, \fB\-\-only\-if\-changed\fR .RS 4 only run the playbook if the repository has been updated .RE .PP \fB\-s\fR \fISLEEP\fR, \fB\-\-sleep\fR \fISLEEP\fR .RS 4 sleep for random interval (between 0 and n number of seconds) before starting\&. This is a useful way to disperse git requests .RE .PP \fB\-t\fR, \fB\-\-tags\fR .RS 4 only run plays and tasks tagged with these values .RE .PP \fB\-u\fR \fIREMOTE_USER\fR, \fB\-\-user\fR \fIREMOTE_USER\fR .RS 4 connect as this user (default=None) .RE .PP \fB\-v\fR, \fB\-\-verbose\fR .RS 4 verbose mode (\-vvv for more, \-vvvv to enable connection debugging) .RE .SH "ENVIRONMENT" .sp The following environment variables may be specified\&. .sp ANSIBLE_CONFIG \(em Override the default ansible config file .sp Many more are available for most options in ansible\&.cfg .SH "FILES" .sp /etc/ansible/ansible\&.cfg \(em Config file, used if present .sp ~/\&.ansible\&.cfg \(em User config file, overrides the default config if present .SH "AUTHOR" .sp Ansible was originally written by Michael DeHaan\&. .SH "COPYRIGHT" .sp Copyright \(co 2017 Red Hat, Inc | Ansible\&. Ansible is released under the terms of the GPLv3 License\&. .SH "SEE ALSO" .sp \fBansible\fR(1), \fBansible\-config\fR(1), \fBansible\-console\fR(1), \fBansible\-doc\fR(1), \fBansible\-galaxy\fR(1), \fBansible\-inventory\fR(1), \fBansible\-playbook\fR(1), \fBansible\-vault\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible ansible-2.5.1/docs/man/man1/ansible-pull.1.asciidoc.in0000644000000000000000000001252013265756165022304 0ustar rootroot00000000000000ansible-pull(1) =============== :doctype: manpage :encoding: utf-8 :lang: en :man source: Ansible :man version: %VERSION% :man manual: System administration commands NAME ---- ansible-pull - pulls playbooks from a VCS repo and executes them for the local host SYNOPSIS -------- ansible-pull -U [options] [] DESCRIPTION ----------- is used to up a remote copy of ansible on each managed node, each set to run via cron and update playbook source via a source repository. This inverts the default *push* architecture of ansible into a *pull* architecture, which has near-limitless scaling potential. The setup playbook can be tuned to change the cron frequency, logging locations, and parameters to ansible-pull. This is useful both for extreme scale-out as well as periodic remediation. Usage of the 'fetch' module to retrieve logs from ansible-pull runs would be an excellent way to gather and analyze remote logs from ansible-pull. COMMON OPTIONS -------------- *--accept-host-key*:: adds the hostkey for the repo url if not already added *--ask-su-pass*:: ask for su password (deprecated, use become) *--ask-sudo-pass*:: ask for sudo password (deprecated, use become) *--ask-vault-pass*:: ask for vault password *--check*:: don't make any changes; instead, try to predict some of the changes that may occur *--clean*:: modified files in the working repository will be discarded *--full*:: Do a full clone, instead of a shallow one. *--list-hosts*:: outputs a list of matching hosts; does not execute anything else *--private-key*, *--key-file*:: use this file to authenticate the connection *--purge*:: purge checkout after playbook run *--scp-extra-args* 'SCP_EXTRA_ARGS':: specify extra arguments to pass to scp only (e.g. -l) *--sftp-extra-args* 'SFTP_EXTRA_ARGS':: specify extra arguments to pass to sftp only (e.g. -f, -l) *--skip-tags*:: only run plays and tasks whose tags do not match these values *--ssh-common-args* 'SSH_COMMON_ARGS':: specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand) *--ssh-extra-args* 'SSH_EXTRA_ARGS':: specify extra arguments to pass to ssh only (e.g. -R) *--track-subs*:: submodules will track the latest changes. This is equivalent to specifying the --remote flag to git submodule update *--vault-id*:: the vault identity to use *--vault-password-file*:: vault password file *--verify-commit*:: verify GPG signature of checked out commit, if it fails abort running the playbook. This needs the corresponding VCS module to support such an operation *--version*:: show program's version number and exit *-C* 'CHECKOUT', *--checkout* 'CHECKOUT':: branch/tag/commit to checkout. Defaults to behavior of repository module. *-K*, *--ask-become-pass*:: ask for privilege escalation password *-M*, *--module-path*:: prepend colon-separated path(s) to module library (default=[u'/home/jenkins/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']) *-T* 'TIMEOUT', *--timeout* 'TIMEOUT':: override the connection timeout in seconds (default=10) *-U* 'URL', *--url* 'URL':: URL of the playbook repository *-c* 'CONNECTION', *--connection* 'CONNECTION':: connection type to use (default=smart) *-d* 'DEST', *--directory* 'DEST':: directory to checkout repository to *-e*, *--extra-vars*:: set additional variables as key=value or YAML/JSON, if filename prepend with @ *-f*, *--force*:: run the playbook even if the repository could not be updated *-h*, *--help*:: show this help message and exit *-i*, *--inventory*, *--inventory-file*:: specify inventory host path or comma separated host list. --inventory-file is deprecated *-k*, *--ask-pass*:: ask for connection password *-l* 'SUBSET', *--limit* 'SUBSET':: further limit selected hosts to an additional pattern *-m* 'MODULE_NAME', *--module-name* 'MODULE_NAME':: Repository module name, which ansible will use to check out the repo. Choices are ('git', 'subversion', 'hg', 'bzr'). Default is git. *-o*, *--only-if-changed*:: only run the playbook if the repository has been updated *-s* 'SLEEP', *--sleep* 'SLEEP':: sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests *-t*, *--tags*:: only run plays and tasks tagged with these values *-u* 'REMOTE_USER', *--user* 'REMOTE_USER':: connect as this user (default=None) *-v*, *--verbose*:: verbose mode (-vvv for more, -vvvv to enable connection debugging) ENVIRONMENT ----------- The following environment variables may be specified. ANSIBLE_CONFIG -- Override the default ansible config file Many more are available for most options in ansible.cfg FILES ----- /etc/ansible/ansible.cfg -- Config file, used if present ~/.ansible.cfg -- User config file, overrides the default config if present AUTHOR ------ Ansible was originally written by Michael DeHaan. COPYRIGHT --------- Copyright © 2017 Red Hat, Inc | Ansible. Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- *ansible*(1), *ansible-config*(1), *ansible-console*(1), *ansible-doc*(1), *ansible-galaxy*(1), *ansible-inventory*(1), *ansible-playbook*(1), *ansible-vault*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found in file CONTRIBUTING.md, available in: ansible-2.5.1/docs/man/man1/ansible-vault.10000644000000000000000000001326313265756166020307 0ustar rootroot00000000000000'\" t .\" Title: ansible-vault .\" Author: [see the "AUTHOR" section] .\" Generator: DocBook XSL Stylesheets v1.78.1 .\" Date: 04/19/2018 .\" Manual: System administration commands .\" Source: Ansible 2.5.1 .\" Language: English .\" .TH "ANSIBLE\-VAULT" "1" "04/19/2018" "Ansible 2\&.5\&.1" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" ansible-vault \- encryption/decryption utility for Ansible data files .SH "SYNOPSIS" .sp ansible\-vault [create|decrypt|edit|encrypt|encrypt_string|rekey|view] [options] [vaultfile\&.yml] .SH "DESCRIPTION" .sp can encrypt any structured data file used by Ansible\&. This can include \fBgroup_vars/\fR or \fBhost_vars/\fR inventory variables, variables loaded by \fBinclude_vars\fR or \fBvars_files\fR, or variable files passed on the ansible\- playbook command line with \fB\-e @file\&.yml\fR or \fB\-e @file\&.json\fR\&. Role variables and defaults are also included! .sp Because Ansible tasks, handlers, and other objects are data, these can also be encrypted with vault\&. If you\(cqd like to not expose what variables you are using, you can keep an individual task file entirely encrypted\&. .sp The password used with vault currently must be the same for all files you wish to use together at the same time\&. .SH "COMMON OPTIONS" .PP \fB\-\-ask\-vault\-pass\fR .RS 4 ask for vault password .RE .PP \fB\-\-new\-vault\-id\fR \fINEW_VAULT_ID\fR .RS 4 the new vault identity to use for rekey .RE .PP \fB\-\-new\-vault\-password\-file\fR .RS 4 new vault password file for rekey .RE .PP \fB\-\-vault\-id\fR .RS 4 the vault identity to use .RE .PP \fB\-\-vault\-password\-file\fR .RS 4 vault password file .RE .PP \fB\-\-version\fR .RS 4 show program\(cqs version number and exit .RE .PP \fB\-h\fR, \fB\-\-help\fR .RS 4 show this help message and exit .RE .PP \fB\-v\fR, \fB\-\-verbose\fR .RS 4 verbose mode (\-vvv for more, \-vvvv to enable connection debugging) .RE .SH "ACTIONS" .PP \fBencrypt\fR .RS 4 encrypt the supplied file using the provided vault secret .PP \fB\-\-encrypt\-vault\-id\fR \fIENCRYPT_VAULT_ID\fR .RS 4 the vault id used to encrypt (required if more than vault\-id is provided) .RE .PP \fB\-\-output\fR .RS 4 output file name for encrypt or decrypt; use \- for stdout .RE .RE .PP \fBrekey\fR .RS 4 re\-encrypt a vaulted file with a new secret, the previous secret is required .PP \fB\-\-encrypt\-vault\-id\fR \fIENCRYPT_VAULT_ID\fR .RS 4 the vault id used to encrypt (required if more than vault\-id is provided) .RE .RE .PP \fBencrypt_string\fR .RS 4 encrypt the supplied string using the provided vault secret .PP \fB\-\-encrypt\-vault\-id\fR \fIENCRYPT_VAULT_ID\fR .RS 4 the vault id used to encrypt (required if more than vault\-id is provided) .RE .PP \fB\-\-output\fR .RS 4 output file name for encrypt or decrypt; use \- for stdout .RE .PP \fB\-\-stdin\-name\fR \fIENCRYPT_STRING_STDIN_NAME\fR .RS 4 Specify the variable name for stdin .RE .PP \fB\-n\fR, \fB\-\-name\fR .RS 4 Specify the variable name .RE .PP \fB\-p\fR, \fB\-\-prompt\fR .RS 4 Prompt for the string to encrypt .RE .RE .PP \fBedit\fR .RS 4 open and decrypt an existing vaulted file in an editor, that will be encryped again when closed .PP \fB\-\-encrypt\-vault\-id\fR \fIENCRYPT_VAULT_ID\fR .RS 4 the vault id used to encrypt (required if more than vault\-id is provided) .RE .RE .PP \fBcreate\fR .RS 4 create and open a file in an editor that will be encryped with the provided vault secret when closed .PP \fB\-\-encrypt\-vault\-id\fR \fIENCRYPT_VAULT_ID\fR .RS 4 the vault id used to encrypt (required if more than vault\-id is provided) .RE .RE .PP \fBdecrypt\fR .RS 4 decrypt the supplied file using the provided vault secret .PP \fB\-\-output\fR .RS 4 output file name for encrypt or decrypt; use \- for stdout .RE .RE .PP \fBview\fR .RS 4 open, decrypt and view an existing vaulted file using a pager using the supplied vault secret .RE .SH "ENVIRONMENT" .sp The following environment variables may be specified\&. .sp ANSIBLE_CONFIG \(em Override the default ansible config file .sp Many more are available for most options in ansible\&.cfg .SH "FILES" .sp /etc/ansible/ansible\&.cfg \(em Config file, used if present .sp ~/\&.ansible\&.cfg \(em User config file, overrides the default config if present .SH "AUTHOR" .sp Ansible was originally written by Michael DeHaan\&. .SH "COPYRIGHT" .sp Copyright \(co 2017 Red Hat, Inc | Ansible\&. Ansible is released under the terms of the GPLv3 License\&. .SH "SEE ALSO" .sp \fBansible\fR(1), \fBansible\-config\fR(1), \fBansible\-console\fR(1), \fBansible\-doc\fR(1), \fBansible\-galaxy\fR(1), \fBansible\-inventory\fR(1), \fBansible\-playbook\fR(1), \fBansible\-pull\fR(1), .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible ansible-2.5.1/docs/man/man1/ansible-vault.1.asciidoc.in0000644000000000000000000001031513265756165022463 0ustar rootroot00000000000000ansible-vault(1) ================ :doctype: manpage :encoding: utf-8 :lang: en :man source: Ansible :man version: %VERSION% :man manual: System administration commands NAME ---- ansible-vault - encryption/decryption utility for Ansible data files SYNOPSIS -------- ansible-vault [create|decrypt|edit|encrypt|encrypt_string|rekey|view] [options] [vaultfile.yml] DESCRIPTION ----------- can encrypt any structured data file used by Ansible. This can include *group_vars/* or *host_vars/* inventory variables, variables loaded by *include_vars* or *vars_files*, or variable files passed on the ansible- playbook command line with *-e @file.yml* or *-e @file.json*. Role variables and defaults are also included! Because Ansible tasks, handlers, and other objects are data, these can also be encrypted with vault. If you'd like to not expose what variables you are using, you can keep an individual task file entirely encrypted. The password used with vault currently must be the same for all files you wish to use together at the same time. COMMON OPTIONS -------------- *--ask-vault-pass*:: ask for vault password *--new-vault-id* 'NEW_VAULT_ID':: the new vault identity to use for rekey *--new-vault-password-file*:: new vault password file for rekey *--vault-id*:: the vault identity to use *--vault-password-file*:: vault password file *--version*:: show program's version number and exit *-h*, *--help*:: show this help message and exit *-v*, *--verbose*:: verbose mode (-vvv for more, -vvvv to enable connection debugging) ACTIONS ------- *encrypt*::: encrypt the supplied file using the provided vault secret *--encrypt-vault-id* 'ENCRYPT_VAULT_ID':: the vault id used to encrypt (required if more than vault-id is provided) *--output*:: output file name for encrypt or decrypt; use - for stdout *rekey*::: re-encrypt a vaulted file with a new secret, the previous secret is required *--encrypt-vault-id* 'ENCRYPT_VAULT_ID':: the vault id used to encrypt (required if more than vault-id is provided) *encrypt_string*::: encrypt the supplied string using the provided vault secret *--encrypt-vault-id* 'ENCRYPT_VAULT_ID':: the vault id used to encrypt (required if more than vault-id is provided) *--output*:: output file name for encrypt or decrypt; use - for stdout *--stdin-name* 'ENCRYPT_STRING_STDIN_NAME':: Specify the variable name for stdin *-n*, *--name*:: Specify the variable name *-p*, *--prompt*:: Prompt for the string to encrypt *edit*::: open and decrypt an existing vaulted file in an editor, that will be encryped again when closed *--encrypt-vault-id* 'ENCRYPT_VAULT_ID':: the vault id used to encrypt (required if more than vault-id is provided) *create*::: create and open a file in an editor that will be encryped with the provided vault secret when closed *--encrypt-vault-id* 'ENCRYPT_VAULT_ID':: the vault id used to encrypt (required if more than vault-id is provided) *decrypt*::: decrypt the supplied file using the provided vault secret *--output*:: output file name for encrypt or decrypt; use - for stdout *view*::: open, decrypt and view an existing vaulted file using a pager using the supplied vault secret ENVIRONMENT ----------- The following environment variables may be specified. ANSIBLE_CONFIG -- Override the default ansible config file Many more are available for most options in ansible.cfg FILES ----- /etc/ansible/ansible.cfg -- Config file, used if present ~/.ansible.cfg -- User config file, overrides the default config if present AUTHOR ------ Ansible was originally written by Michael DeHaan. COPYRIGHT --------- Copyright © 2017 Red Hat, Inc | Ansible. Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- *ansible*(1), *ansible-config*(1), *ansible-console*(1), *ansible-doc*(1), *ansible-galaxy*(1), *ansible-inventory*(1), *ansible-playbook*(1), *ansible-pull*(1), Extensive documentation is available in the documentation site: . IRC and mailing list info can be found in file CONTRIBUTING.md, available in: ansible-2.5.1/docs/man/man1/ansible.10000644000000000000000000001620613265756170017151 0ustar rootroot00000000000000'\" t .\" Title: ansible .\" Author: [see the "AUTHOR" section] .\" Generator: DocBook XSL Stylesheets v1.78.1 .\" Date: 04/19/2018 .\" Manual: System administration commands .\" Source: Ansible 2.5.1 .\" Language: English .\" .TH "ANSIBLE" "1" "04/19/2018" "Ansible 2\&.5\&.1" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .\" http://bugs.debian.org/507673 .\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html .\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" ----------------------------------------------------------------- .\" * set default formatting .\" ----------------------------------------------------------------- .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) .ad l .\" ----------------------------------------------------------------- .\" * MAIN CONTENT STARTS HERE * .\" ----------------------------------------------------------------- .SH "NAME" ansible \- Define and run a single task \*(Aqplaybook\*(Aq against a set of hosts .SH "SYNOPSIS" .sp ansible [options] .SH "DESCRIPTION" .sp is an extra\-simple tool/framework/API for doing \fIremote things\fR\&. this command allows you to define and run a single task \fIplaybook\fR against a set of hosts .SH "COMMON OPTIONS" .PP \fB\-\-ask\-su\-pass\fR .RS 4 ask for su password (deprecated, use become) .RE .PP \fB\-\-ask\-sudo\-pass\fR .RS 4 ask for sudo password (deprecated, use become) .RE .PP \fB\-\-ask\-vault\-pass\fR .RS 4 ask for vault password .RE .PP \fB\-\-become\-method\fR \fIBECOME_METHOD\fR .RS 4 privilege escalation method to use (default=sudo), valid choices: [ sudo | su | pbrun | pfexec | doas | dzdo | ksu | runas | pmrun | enable ] .RE .PP \fB\-\-become\-user\fR \fIBECOME_USER\fR .RS 4 run operations as this user (default=root) .RE .PP \fB\-\-list\-hosts\fR .RS 4 outputs a list of matching hosts; does not execute anything else .RE .PP \fB\-\-playbook\-dir\fR \fIBASEDIR\fR .RS 4 Since this tool does not use playbooks, use this as a subsitute playbook directory\&.This sets the relative path for many features including roles/ group_vars/ etc\&. .RE .PP \fB\-\-private\-key\fR, \fB\-\-key\-file\fR .RS 4 use this file to authenticate the connection .RE .PP \fB\-\-scp\-extra\-args\fR \fISCP_EXTRA_ARGS\fR .RS 4 specify extra arguments to pass to scp only (e\&.g\&. \-l) .RE .PP \fB\-\-sftp\-extra\-args\fR \fISFTP_EXTRA_ARGS\fR .RS 4 specify extra arguments to pass to sftp only (e\&.g\&. \-f, \-l) .RE .PP \fB\-\-ssh\-common\-args\fR \fISSH_COMMON_ARGS\fR .RS 4 specify common arguments to pass to sftp/scp/ssh (e\&.g\&. ProxyCommand) .RE .PP \fB\-\-ssh\-extra\-args\fR \fISSH_EXTRA_ARGS\fR .RS 4 specify extra arguments to pass to ssh only (e\&.g\&. \-R) .RE .PP \fB\-\-syntax\-check\fR .RS 4 perform a syntax check on the playbook, but do not execute it .RE .PP \fB\-\-vault\-id\fR .RS 4 the vault identity to use .RE .PP \fB\-\-vault\-password\-file\fR .RS 4 vault password file .RE .PP \fB\-\-version\fR .RS 4 show program\(cqs version number and exit .RE .PP \fB\-B\fR \fISECONDS\fR, \fB\-\-background\fR \fISECONDS\fR .RS 4 run asynchronously, failing after X seconds (default=N/A) .RE .PP \fB\-C\fR, \fB\-\-check\fR .RS 4 don\(cqt make any changes; instead, try to predict some of the changes that may occur .RE .PP \fB\-D\fR, \fB\-\-diff\fR .RS 4 when changing (small) files and templates, show the differences in those files; works great with \-\-check .RE .PP \fB\-K\fR, \fB\-\-ask\-become\-pass\fR .RS 4 ask for privilege escalation password .RE .PP \fB\-M\fR, \fB\-\-module\-path\fR .RS 4 prepend colon\-separated path(s) to module library (default=[u\*(Aq/home/jenkins/\&.ansible/plugins/modules\*(Aq, u\*(Aq/usr/share/ansible/plugins/modules\*(Aq]) .RE .PP \fB\-P\fR \fIPOLL_INTERVAL\fR, \fB\-\-poll\fR \fIPOLL_INTERVAL\fR .RS 4 set the poll interval if using \-B (default=15) .RE .PP \fB\-R\fR \fISU_USER\fR, \fB\-\-su\-user\fR \fISU_USER\fR .RS 4 run operations with su as this user (default=None) (deprecated, use become) .RE .PP \fB\-S\fR, \fB\-\-su\fR .RS 4 run operations with su (deprecated, use become) .RE .PP \fB\-T\fR \fITIMEOUT\fR, \fB\-\-timeout\fR \fITIMEOUT\fR .RS 4 override the connection timeout in seconds (default=10) .RE .PP \fB\-U\fR \fISUDO_USER\fR, \fB\-\-sudo\-user\fR \fISUDO_USER\fR .RS 4 desired sudo user (default=root) (deprecated, use become) .RE .PP \fB\-a\fR \fIMODULE_ARGS\fR, \fB\-\-args\fR \fIMODULE_ARGS\fR .RS 4 module arguments .RE .PP \fB\-b\fR, \fB\-\-become\fR .RS 4 run operations with become (does not imply password prompting) .RE .PP \fB\-c\fR \fICONNECTION\fR, \fB\-\-connection\fR \fICONNECTION\fR .RS 4 connection type to use (default=smart) .RE .PP \fB\-e\fR, \fB\-\-extra\-vars\fR .RS 4 set additional variables as key=value or YAML/JSON, if filename prepend with @ .RE .PP \fB\-f\fR \fIFORKS\fR, \fB\-\-forks\fR \fIFORKS\fR .RS 4 specify number of parallel processes to use (default=5) .RE .PP \fB\-h\fR, \fB\-\-help\fR .RS 4 show this help message and exit .RE .PP \fB\-i\fR, \fB\-\-inventory\fR, \fB\-\-inventory\-file\fR .RS 4 specify inventory host path or comma separated host list\&. \-\-inventory\-file is deprecated .RE .PP \fB\-k\fR, \fB\-\-ask\-pass\fR .RS 4 ask for connection password .RE .PP \fB\-l\fR \fISUBSET\fR, \fB\-\-limit\fR \fISUBSET\fR .RS 4 further limit selected hosts to an additional pattern .RE .PP \fB\-m\fR \fIMODULE_NAME\fR, \fB\-\-module\-name\fR \fIMODULE_NAME\fR .RS 4 module name to execute (default=command) .RE .PP \fB\-o\fR, \fB\-\-one\-line\fR .RS 4 condense output .RE .PP \fB\-s\fR, \fB\-\-sudo\fR .RS 4 run operations with sudo (nopasswd) (deprecated, use become) .RE .PP \fB\-t\fR \fITREE\fR, \fB\-\-tree\fR \fITREE\fR .RS 4 log output to this directory .RE .PP \fB\-u\fR \fIREMOTE_USER\fR, \fB\-\-user\fR \fIREMOTE_USER\fR .RS 4 connect as this user (default=None) .RE .PP \fB\-v\fR, \fB\-\-verbose\fR .RS 4 verbose mode (\-vvv for more, \-vvvv to enable connection debugging) .RE .SH "ENVIRONMENT" .sp The following environment variables may be specified\&. .sp ANSIBLE_CONFIG \(em Override the default ansible config file .sp Many more are available for most options in ansible\&.cfg .SH "FILES" .sp /etc/ansible/ansible\&.cfg \(em Config file, used if present .sp ~/\&.ansible\&.cfg \(em User config file, overrides the default config if present .SH "AUTHOR" .sp Ansible was originally written by Michael DeHaan\&. .SH "COPYRIGHT" .sp Copyright \(co 2017 Red Hat, Inc | Ansible\&. Ansible is released under the terms of the GPLv3 License\&. .SH "SEE ALSO" .sp \fBansible\-config\fR(1), \fBansible\-console\fR(1), \fBansible\-doc\fR(1), \fBansible\-galaxy\fR(1), \fBansible\-inventory\fR(1), \fBansible\-playbook\fR(1), \fBansible\-pull\fR(1), \fBansible\-vault\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible ansible-2.5.1/docs/man/man1/ansible.1.asciidoc.in0000644000000000000000000001204113265756165021330 0ustar rootroot00000000000000ansible(1) ========== :doctype: manpage :encoding: utf-8 :lang: en :man source: Ansible :man version: %VERSION% :man manual: System administration commands NAME ---- ansible - Define and run a single task 'playbook' against a set of hosts SYNOPSIS -------- ansible [options] DESCRIPTION ----------- is an extra-simple tool/framework/API for doing 'remote things'. this command allows you to define and run a single task 'playbook' against a set of hosts COMMON OPTIONS -------------- *--ask-su-pass*:: ask for su password (deprecated, use become) *--ask-sudo-pass*:: ask for sudo password (deprecated, use become) *--ask-vault-pass*:: ask for vault password *--become-method* 'BECOME_METHOD':: privilege escalation method to use (default=sudo), valid choices: [ sudo | su | pbrun | pfexec | doas | dzdo | ksu | runas | pmrun | enable ] *--become-user* 'BECOME_USER':: run operations as this user (default=root) *--list-hosts*:: outputs a list of matching hosts; does not execute anything else *--playbook-dir* 'BASEDIR':: Since this tool does not use playbooks, use this as a subsitute playbook directory.This sets the relative path for many features including roles/ group_vars/ etc. *--private-key*, *--key-file*:: use this file to authenticate the connection *--scp-extra-args* 'SCP_EXTRA_ARGS':: specify extra arguments to pass to scp only (e.g. -l) *--sftp-extra-args* 'SFTP_EXTRA_ARGS':: specify extra arguments to pass to sftp only (e.g. -f, -l) *--ssh-common-args* 'SSH_COMMON_ARGS':: specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand) *--ssh-extra-args* 'SSH_EXTRA_ARGS':: specify extra arguments to pass to ssh only (e.g. -R) *--syntax-check*:: perform a syntax check on the playbook, but do not execute it *--vault-id*:: the vault identity to use *--vault-password-file*:: vault password file *--version*:: show program's version number and exit *-B* 'SECONDS', *--background* 'SECONDS':: run asynchronously, failing after X seconds (default=N/A) *-C*, *--check*:: don't make any changes; instead, try to predict some of the changes that may occur *-D*, *--diff*:: when changing (small) files and templates, show the differences in those files; works great with --check *-K*, *--ask-become-pass*:: ask for privilege escalation password *-M*, *--module-path*:: prepend colon-separated path(s) to module library (default=[u'/home/jenkins/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']) *-P* 'POLL_INTERVAL', *--poll* 'POLL_INTERVAL':: set the poll interval if using -B (default=15) *-R* 'SU_USER', *--su-user* 'SU_USER':: run operations with su as this user (default=None) (deprecated, use become) *-S*, *--su*:: run operations with su (deprecated, use become) *-T* 'TIMEOUT', *--timeout* 'TIMEOUT':: override the connection timeout in seconds (default=10) *-U* 'SUDO_USER', *--sudo-user* 'SUDO_USER':: desired sudo user (default=root) (deprecated, use become) *-a* 'MODULE_ARGS', *--args* 'MODULE_ARGS':: module arguments *-b*, *--become*:: run operations with become (does not imply password prompting) *-c* 'CONNECTION', *--connection* 'CONNECTION':: connection type to use (default=smart) *-e*, *--extra-vars*:: set additional variables as key=value or YAML/JSON, if filename prepend with @ *-f* 'FORKS', *--forks* 'FORKS':: specify number of parallel processes to use (default=5) *-h*, *--help*:: show this help message and exit *-i*, *--inventory*, *--inventory-file*:: specify inventory host path or comma separated host list. --inventory-file is deprecated *-k*, *--ask-pass*:: ask for connection password *-l* 'SUBSET', *--limit* 'SUBSET':: further limit selected hosts to an additional pattern *-m* 'MODULE_NAME', *--module-name* 'MODULE_NAME':: module name to execute (default=command) *-o*, *--one-line*:: condense output *-s*, *--sudo*:: run operations with sudo (nopasswd) (deprecated, use become) *-t* 'TREE', *--tree* 'TREE':: log output to this directory *-u* 'REMOTE_USER', *--user* 'REMOTE_USER':: connect as this user (default=None) *-v*, *--verbose*:: verbose mode (-vvv for more, -vvvv to enable connection debugging) ENVIRONMENT ----------- The following environment variables may be specified. ANSIBLE_CONFIG -- Override the default ansible config file Many more are available for most options in ansible.cfg FILES ----- /etc/ansible/ansible.cfg -- Config file, used if present ~/.ansible.cfg -- User config file, overrides the default config if present AUTHOR ------ Ansible was originally written by Michael DeHaan. COPYRIGHT --------- Copyright © 2017 Red Hat, Inc | Ansible. Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- *ansible-config*(1), *ansible-console*(1), *ansible-doc*(1), *ansible-galaxy*(1), *ansible-inventory*(1), *ansible-playbook*(1), *ansible-pull*(1), *ansible-vault*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found in file CONTRIBUTING.md, available in: ansible-2.5.1/docs/man/man3/0000755000000000000000000000000013265756221015444 5ustar rootroot00000000000000ansible-2.5.1/docs/man/man3/.gitdir0000644000000000000000000000000013265756155016723 0ustar rootroot00000000000000ansible-2.5.1/docs/man/.gitignore0000644000000000000000000000002113265756155016575 0ustar rootroot00000000000000*.xml *.asciidoc ansible-2.5.1/docs/templates/0000755000000000000000000000000013265756221016031 5ustar rootroot00000000000000ansible-2.5.1/docs/templates/cli_rst.j20000644000000000000000000000503713265756155017740 0ustar rootroot00000000000000{% set name = cli_name -%} {% set name_slug = cli_name -%} .. _{{name}}: {% set name_len = name|length + 0-%} {{ '=' * name_len }} {{name}} {{ '=' * name_len }} :strong:`{{short_desc|default('')}}` .. contents:: :local: :depth: 2 .. program:: {{cli_name}} Synopsis ======== .. code-block:: bash {{ usage|replace('%prog', cli_name) }} Description =========== {{ long_desc|default('', True) }} {% if options %} Common Options ============== {% for option in options|sort(attribute='options') %} .. option:: {% for switch in option['options'] %}{{switch}}{% if option['arg'] %} <{{option['arg']}}>{% endif %}{% if not loop.last %}, {% endif %}{% endfor %} {{ option['desc'] }} {% endfor %} {% endif %} {% if arguments %} ARGUMENTS ========= .. program:: {{cli_name}} {% for arg in arguments %} .. option:: {{ arg }} {{ (arguments[arg]|default(' '))}} {% endfor %} {% endif %} {% if actions %} Actions ======= {% for action in actions %} .. program:: {{cli_name}} {{action}} .. _{{cli_name|replace('-','_')}}_{{action}}: {{ action}} {{ '-' * action|length}} {{ (actions[action]['desc']|default(' '))}} {% if actions[action]['options'] %} {% for option in actions[action]['options']|sort(attribute='options') %} .. option:: {% for switch in option['options'] if switch in actions[action]['option_names'] %}{{switch}} {% if option['arg'] %} <{{option['arg']}}>{% endif %}{% if not loop.last %}, {% endif %}{% endfor %} {{ (option['desc']) }} {% endfor %} {% endif %} {% endfor %} .. program:: {{cli_name}} {% endif %} Environment =========== The following environment variables may be specified. {% if inventory %} :envvar:`ANSIBLE_INVENTORY` -- Override the default ansible inventory file {% endif %} {% if library %} :envvar:`ANSIBLE_LIBRARY` -- Override the default ansible module library path {% endif %} :envvar:`ANSIBLE_CONFIG` -- Override the default ansible config file Many more are available for most options in ansible.cfg Files ===== {% if inventory %} :file:`/etc/ansible/hosts` -- Default inventory file {% endif %} :file:`/etc/ansible/ansible.cfg` -- Config file, used if present :file:`~/.ansible.cfg` -- User config file, overrides the default config if present Author ====== Ansible was originally written by Michael DeHaan. See the `AUTHORS` file for a complete list of contributors. Copyright ========= Copyright © 2017 Red Hat, Inc | Ansible. Ansible is released under the terms of the GPLv3 License. See also ======== {% for other in cli_bin_name_list|sort %}:manpage:`{{other}}(1)`, {% endfor %} ansible-2.5.1/docs/templates/config.rst.j20000644000000000000000000000663513265756155020362 0ustar rootroot00000000000000.. _ansible_configuration_settings: {% set name = 'Ansible Configuration Settings' -%} {% set name_slug = 'config' -%} {% set name_len = name|length + 0-%} {{ '=' * name_len }} {{name}} {{ '=' * name_len }} Ansible supports a few ways of providing configuration variables, mainly through environment variables, command line switches and an ini file named ``ansible.cfg``. Starting at Ansible 2.4 the ``ansible-config`` utility allows users to see all the configuration settings available, their defaults, how to set them and where their current value comes from. See :doc:ansible-config for more information. The configuration file ====================== Changes can be made and used in a configuration file which will be searched for in the following order: * ``ANSIBLE_CONFIG`` (environment variable if set) * ``ansible.cfg`` (in the current directory) * ``~/.ansible.cfg`` (in the home directory) * ``/etc/ansible/ansible.cfg`` Ansible will process the above list and use the first file found, all others are ignored. .. note:: The configuration file is one variant of an INI format. Both the hash sign (``#``) and semicolon (``;``) are allowed as comment markers when the comment starts the line. However, if the comment is inline with regular values, only the semicolon is allowed to introduce the comment. For instance:: # some basic default values... inventory = /etc/ansible/hosts ; This points to the file that lists your hosts Common Options ============== This is a copy of the options available from our release, your local install might have extra options due to additional plugins, you can use the command line utility mentioned above (`ansible-config`) to browse through those. {% if config_options %} {% for config_option in config_options|sort %} {% set config_len = config_option|length -%} {% set config = config_options[config_option] %} .. _{{config_option}}: {{config_option}} {{ '-' * config_len }} {% if config['description'] and config['description'] != [''] %} {% if config['description'] != ['TODO: write it'] %} :Description: {{' '.join(config['description'])}} {% endif %} {% endif %} {% if config['type'] %} :Type: {{config['type']}} {% endif %} :Default: {{config['default']}} {% if config['version_added'] %} :Version Added: {{config['version_added']}} {% endif %} {% for ini_map in config['ini']|sort(attribute='section') %} :Ini Section: {{ini_map['section']}} :Ini Key: {{ini_map['key']}} {% endfor %} {% for env_var_map in config['env']|sort(attribute='name') %} :Environment: :envvar:`{{env_var_map['name']}}` {% endfor %} {% if config['deprecated'] %} :Deprecated in: {{config['deprecated']['version']}} :Deprecated detail: {{config['deprecated']['why']}} :Deprecated alternatives: {{config['deprecated']['alternatives']}} {% endif %} {% endfor %} Environment Variables ===================== .. envvar:: ANSIBLE_CONFIG Override the default ansible config file {% for config_option in config_options %} {% for env_var_map in config_options[config_option]['env'] %} .. envvar:: {{env_var_map['name']}} {% if config_options[config_option]['description'] and config_options[config_option]['description'] != [''] %} {% if config_options[config_option]['description'] != ['TODO: write it'] %} {{ ''.join(config_options[config_option]['description']) }} {% endif %} {% endif %} See also :ref:`{{config_option}} <{{config_option}}>` {% endfor %} {% endfor %} {% endif %} ansible-2.5.1/docs/templates/list_of_CATEGORY_modules.rst.j20000644000000000000000000000166613265756155023600 0ustar rootroot00000000000000.. _@{ title.lower() + '_' + plugin_type + 's' }@: @{ title }@ @{ plugin_type + 's' }@ @{ '`' * title | length }@```````` {% if blurb %} @{ blurb }@ {% endif %} {% if category['_modules'] %} {% for module in category['_modules'] | sort %} * :ref:`@{ module }@_@{ plugin_type }@`{% if module_info[module]['deprecated'] %} **(D)**{% endif%} {% endfor %} {% endif %} {% for name, info in subcategories.items() | sort %} .. _@{ name.lower() + '_' + title.lower() + '_' + plugin_type + 's' }@: @{ name.title() }@ @{ '-' * name | length }@ {% for module in info['_modules'] | sort %} * :ref:`@{ module }@_@{ plugin_type }@`{% if module_info[module]['deprecated'] %} **(D)**{% endif%} {% endfor %} {% endfor %} .. note:: - **(D)**: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale. ansible-2.5.1/docs/templates/list_of_CATEGORY_plugins.rst.j20000644000000000000000000000227513265756155023606 0ustar rootroot00000000000000.. _@{ title.lower() + '_' + plugin_type + 's' }@: @{ title }@ @{ plugin_type }@ @{ '`' * title | length }@```````` {% if blurb %} @{ blurb }@ {% endif %} .. toctree:: :maxdepth: 1 {% if category['_modules'] %} {% for module in category['_modules'] | sort %} @{ module }@{% if module_info[module]['deprecated'] %} **(D)**{% endif%}{% if module_info[module]['doc']['short_description'] %} -- @{ module_info[module]['doc']['short_description'] }@{% endif %} {% endfor %} {% endif %} {% for name, info in subcategories.items() | sort %} .. _@{ name.lower() + '_' + title.lower() + '_' + plugin_type + 's' }@: @{ name.title() }@ @{ '-' * name | length }@ .. toctree:: :maxdepth: 1 {% for module in info['_modules'] | sort %} :ref:`@{ module }@_@{ plugin_type }@`{% if module_info[module]['deprecated'] %} **(D)**{% endif%} -- @{ module_info[module]['doc']['short_description'] }@ {% endfor %} {% endfor %} .. note:: - **(D)**: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale. ansible-2.5.1/docs/templates/man.j20000644000000000000000000000560513265756155017055 0ustar rootroot00000000000000{% set name = ('ansible' if cli == 'adhoc' else 'ansible-%s' % cli) -%} {{name}}(1) {{ '=' * ((name|length|int) + 3) }} :doctype: manpage :encoding: utf-8 :lang: en :man source: Ansible :man version: %VERSION% :man manual: System administration commands NAME ---- ansible{% if cli != 'adhoc' %}-{{cli}}{% endif %} - {{short_desc|default('')}} SYNOPSIS -------- {{ usage|replace('%prog', name) }} DESCRIPTION ----------- {{ long_desc|default('', True)|wordwrap }} {% if options %} COMMON OPTIONS -------------- {% for option in options|sort(attribute='options') %} {% for switch in option['options'] %}*{{switch}}*{% if option['arg'] %} '{{option['arg']}}'{% endif %}{% if not loop.last %}, {% endif %}{% endfor %}:: {{ option['desc'] }} {% endfor %} {% endif %} {% if arguments %} ARGUMENTS --------- {% for arg in arguments %} {{ arg }} {{ (arguments[arg]|default(' '))|wordwrap }} {% endfor %} {% endif %} {% if actions %} ACTIONS ------- {% for action in actions %} *{{ action }}*::: {{ (actions[action]['desc']|default(' '))|wordwrap}} {% if actions[action]['options'] %} {% for option in actions[action]['options']|sort(attribute='options') %} {% for switch in option['options'] if switch in actions[action]['option_names'] %}*{{switch}}*{% if option['arg'] %} '{{option['arg']}}'{% endif %}{% if not loop.last %}, {% endif %}{% endfor %}:: {{ (option['desc']) }} {% endfor %} {% endif %} {% endfor %} {% endif %} {% if inventory %} INVENTORY --------- Ansible stores the hosts it can potentially operate on in an inventory. This can be an YAML file, ini-like file, a script, directory, list, etc. For additional options, see the documentation on http://docs.ansible.com/. {% endif %} ENVIRONMENT ----------- The following environment variables may be specified. {% if inventory %} ANSIBLE_INVENTORY -- Override the default ansible inventory file {% endif %} {% if library %} ANSIBLE_LIBRARY -- Override the default ansible module library path {% endif %} ANSIBLE_CONFIG -- Override the default ansible config file Many more are available for most options in ansible.cfg FILES ----- {% if inventory %} /etc/ansible/hosts -- Default inventory file {% endif %} /etc/ansible/ansible.cfg -- Config file, used if present ~/.ansible.cfg -- User config file, overrides the default config if present AUTHOR ------ Ansible was originally written by Michael DeHaan. COPYRIGHT --------- Copyright © 2017 Red Hat, Inc | Ansible. Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- {% for other in cli_list|sort %}{% if other != cli %}*ansible{% if other != 'adhoc' %}-{{other}}{% endif %}*(1){% if not loop.last %}, {% endif %}{% endif %}{% endfor %} Extensive documentation is available in the documentation site: . IRC and mailing list info can be found in file CONTRIBUTING.md, available in: ansible-2.5.1/docs/templates/modules_by_category.rst.j20000644000000000000000000000017713265756155023147 0ustar rootroot00000000000000Module Index ============ .. toctree:: :maxdepth: 1 {% for name in categories %} list_of_@{ name }@_modules {% endfor %} ansible-2.5.1/docs/templates/modules_by_support.rst.j20000644000000000000000000000076213265756155023046 0ustar rootroot00000000000000.. _@{ slug }@: Modules Maintained by the @{ maintainers }@ ``````````````````````````@{ '`' * maintainers | length }@ {% for module in modules | sort %} * :ref:`@{ module }@_@{plugin_type}@`{% if module_info[module]['deprecated'] %} **(D)**{% endif%} {% endfor %} .. note:: - **(D)**: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale. ansible-2.5.1/docs/templates/playbooks_keywords.rst.j20000644000000000000000000000133513265756155023037 0ustar rootroot00000000000000Playbook Keywords ================= These are the keywords available on common playbook objects. .. note:: Please note: * Aliases for the directives are not reflected here, nor are mutable one. For example, :term:`action` in task can be substituted by the name of any Ansible module. * The keywords do not have ``version_added`` information at this time * Some keywords set defaults for the objects inside of them rather than for the objects themselves .. contents:: :local: :depth: 1 {% for name in clist %} {{ name }} {{ '-' * name|length }} .. glossary:: {% for attribute in oblist[name]|sort %} {{ attribute }} {{ oblist[name][attribute] |indent(8) }} {% endfor %} {% endfor %} ansible-2.5.1/docs/templates/plugin.rst.j20000644000000000000000000004035313265756155020406 0ustar rootroot00000000000000:source: @{ source }@ .. _@{ module }@_@{ plugin_type }@: {% for alias in aliases %} .. _@{ alias }@: {% endfor %} {% if short_description %} {% set title = module + ' - ' + short_description|convert_symbols_to_format %} {% else %} {% set title = module %} {% endif %} @{ title }@ @{ '+' * title|length }@ {% if version_added is defined and version_added != '' -%} .. versionadded:: @{ version_added | default('') }@ {% endif %} .. contents:: :local: :depth: 2 {# ------------------------------------------ # # Please note: this looks like a core dump # but it isn't one. # --------------------------------------------#} {% if deprecated is defined -%} DEPRECATED ---------- {# use unknown here? skip the fields? #} :Removed in Ansible: version: @{ deprecated['removed_in'] | default('') | string | convert_symbols_to_format }@ :Why: @{ deprecated['why'] | default('') | convert_symbols_to_format }@ :Alternative: @{ deprecated['alternative'] | default('')| convert_symbols_to_format }@ {% endif %} Synopsis -------- {% if description -%} {% if description is string -%} - @{ description | convert_symbols_to_format }@ {% else %} {% for desc in description %} - @{ desc | convert_symbols_to_format }@ {% endfor %} {% endif %} {% endif %} {% if aliases is defined -%} Aliases: @{ ','.join(aliases) }@ {% endif %} {% if requirements -%} Requirements ~~~~~~~~~~~~ {% if plugin_type == 'module' %} The below requirements are needed on the host that executes this @{ plugin_type }@. {% else %} The below requirements are needed on the local master node that executes this @{ plugin_type }@. {% endif %} {% for req in requirements %} - @{ req | convert_symbols_to_format }@ {% endfor %} {% endif %} {% if options -%} Parameters ---------- .. raw:: html {# Header of the documentation #} {% if plugin_type != 'module' %} {% endif %} {% for key, value in options|dictsort recursive %} {# parameter name with required and/or introduced label #} {# default / choices #} {# configuration #} {% if plugin_type != 'module' %} {% endif %} {# description #} {% if value.suboptions %} {% if value.suboptions.items %} @{ loop(value.suboptions.items()) }@ {% elif value.suboptions[0].items %} @{ loop(value.suboptions[0].items()) }@ {% endif %} {% endif %} {% endfor %}
Parameter
Choices/Defaults
Configuration
Comments
{% for i in range(1, loop.depth) %}
 
{% endfor %}
@{ key }@ {% if value.get('required', False) %}
required
{% endif %} {% if value.version_added %}
(added in @{value.version_added}@)
{% endif %}
{# Turn boolean values in 'yes' and 'no' values #} {% if value.default is defined %} {% if value.default == true %} {% set _x = value.update({'default': 'yes'}) %} {% elif value.default == false %} {% set _x = value.update({'default': 'no'}) %} {% endif %} {% endif %} {% if value.type == 'bool' %} {% set _x = value.update({'choices': ['no', 'yes']}) %} {% endif %} {# Show possible choices and highlight details #} {% if value.choices %}
    Choices: {% for choice in value.choices %} {# Turn boolean values in 'yes' and 'no' values #} {% if choice == true %} {% set choice = 'yes' %} {% elif choice == false %} {% set choice = 'no' %} {% endif %} {% if (value.default is string and value.default == choice) or (value.default is iterable and value.default is not string and choice in value.default) %}
  • @{ choice | escape }@ ←
  • {% else %}
  • @{ choice | escape }@
  • {% endif %} {% endfor %}
{% endif %} {# Show default value, when multiple choice or no choices #} {% if value.default is defined and value.default not in value.choices %} Default:
@{ value.default | escape }@
{% endif %}
{% if 'ini' in value %}
ini entries: {% for ini in value.ini %}

[@{ ini.section }@ ]
@{ ini.key }@ = @{ value.default | default('VALUE') }@

{% endfor %}
{% endif %} {% if 'env' in value %} {% for env in value.env %}
env:@{ env.name }@
{% endfor %} {% endif %} {% if 'vars' in value %} {% for myvar in value.vars %}
var: @{ myvar.name }@
{% endfor %} {% endif %}
{% if value.description is string %}
@{ value.description | replace('\n', '\n ') | html_ify }@
{% else %} {% for desc in value.description %}
@{ desc | replace('\n', '\n ') | html_ify }@
{% endfor %} {% endif %} {% if 'aliases' in value and value.aliases %}

aliases: @{ value.aliases|join(', ') }@
{% endif %}

{% endif %} {% if notes -%} Notes ----- .. note:: {% for note in notes %} - @{ note | convert_symbols_to_format }@ {% endfor %} {% endif %} {% if examples or plainexamples -%} Examples -------- .. code-block:: yaml {% for example in examples %} {% if example['description'] %}@{ example['description'] | indent(4, True) }@{% endif %} @{ example['code'] | escape | indent(4, True) }@ {% endfor %} {% if plainexamples %}@{ plainexamples | indent(4, True) }@{% endif %} {% endif %} {% if not returnfacts and returndocs and returndocs.ansible_facts is defined %} {% set returnfacts = returndocs.ansible_facts.contains %} {% set _x = returndocs.pop('ansible_facts', None) %} {% endif %} {% if returnfacts -%} Returned Facts -------------- Facts returned by this module are added/updated in the ``hostvars`` host facts and can be referenced by name just like any other host fact. They do not need to be registered in order to use them. .. raw:: html {% for key, value in returnfacts|dictsort recursive %} {# --------------------------------------------------------- # sadly we cannot blindly iterate through the child dicts, # since in some documentations, # lists are used instead of dicts. This handles both types # ---------------------------------------------------------#} {% if value.contains %} {% if value.contains.items %} @{ loop(value.contains.items()) }@ {% elif value.contains[0].items %} @{ loop(value.contains[0].items()) }@ {% endif %} {% endif %} {% endfor %}
Fact
Returned
Description
{% for i in range(1, loop.depth) %}
 
{% endfor %}
@{ key }@
@{ value.type }@
@{ value.returned | html_ify }@
{% if value.description is string %}
@{ value.description | html_ify }@
{% else %} {% for desc in value.description %}
@{ desc | html_ify }@
{% endfor %} {% endif %}
{% if value.sample is defined and value.sample %}
Sample:
{# TODO: The sample should be escaped, using | escape or | htmlify, but both mess things up beyond repair with dicts #}
@{ value.sample | replace('\n', '\n ') | html_ify }@
{% endif %}


{% endif %} {% if returndocs -%} Return Values ------------- Common return values are documented :ref:`here `, the following are the fields unique to this @{ plugin_type }@: .. raw:: html {% for key, value in returndocs|dictsort recursive %} {# --------------------------------------------------------- # sadly we cannot blindly iterate through the child dicts, # since in some documentations, # lists are used instead of dicts. This handles both types # ---------------------------------------------------------#} {% if value.contains %} {% if value.contains.items %} @{ loop(value.contains.items()) }@ {% elif value.contains[0].items %} @{ loop(value.contains[0].items()) }@ {% endif %} {% endif %} {% endfor %}
Key
Returned
Description
{% for i in range(1, loop.depth) %}
 
{% endfor %}
@{ key }@
@{ value.type }@
@{ value.returned | html_ify }@
{% if value.description is string %}
@{ value.description | html_ify }@
{% else %} {% for desc in value.description %}
@{ desc | html_ify }@
{% endfor %} {% endif %}
{% if value.sample is defined and value.sample %}
Sample:
{# TODO: The sample should be escaped, using |escape or |htmlify, but both mess things up beyond repair with dicts #}
@{ value.sample | replace('\n', '\n ') | html_ify }@
{% endif %}


{% endif %} Status ------ {% if not deprecated %} {% set support = { 'core': 'The Ansible Core Team', 'network': 'The Ansible Network Team', 'certified': 'an Ansible Partner', 'community': 'The Ansible Community', 'curated': 'A Third Party'} %} {% set module_states = { 'preview': 'it is not guaranteed to have a backwards compatible interface', 'stableinterface': 'the maintainers for this module guarantee that no backward incompatible interface changes will be made'} %} {% if metadata %} {% if metadata.status %} {% for cur_state in metadata.status %} This module is flagged as **@{cur_state}@** which means that @{module_states[cur_state]}@. {% endfor %} {% endif %} {% if metadata.supported_by in ('core', 'network') %} Support ~~~~~~~ For more information about Red Hat's support of this @{ plugin_type }@, please refer to this `Knowledge Base article `_ {% endif %} {% endif %} {% else %} This module is flagged as **deprecated** and will be removed in version @{ deprecated['removed_in'] | default('') | string | convert_symbols_to_format }@. For more information see `DEPRECATED`_. {% endif %} {% if author is defined -%} Author ~~~~~~ {% for author_name in author %} - @{ author_name }@ {% endfor %} {% endif %} .. hint:: {% if plugin_type == 'module' %} If you notice any issues in this documentation you can `edit this document `_ to improve it. {% else %} If you notice any issues in this documentation you can `edit this document {% endfor %} .. note:: - **(D)**: This marks a plugin as deprecated, which means a plugin is kept for backwards compatibility but usage is discouraged. The plugin documentation details page may explain more about this rationale. ansible-2.5.1/examples/0000755000000000000000000000000013265756221014721 5ustar rootroot00000000000000ansible-2.5.1/examples/ansible.cfg0000644000000000000000000004556313265756155017042 0ustar rootroot00000000000000# config file for ansible -- https://ansible.com/ # =============================================== # nearly all parameters can be overridden in ansible-playbook # or with command line flags. ansible will read ANSIBLE_CONFIG, # ansible.cfg in the current working directory, .ansible.cfg in # the home directory or /etc/ansible/ansible.cfg, whichever it # finds first [defaults] # some basic default values... #inventory = /etc/ansible/hosts #library = /usr/share/my_modules/ #module_utils = /usr/share/my_module_utils/ #remote_tmp = ~/.ansible/tmp #local_tmp = ~/.ansible/tmp #plugin_filters_cfg = /etc/ansible/plugin_filters.yml #forks = 5 #poll_interval = 15 #sudo_user = root #ask_sudo_pass = True #ask_pass = True #transport = smart #remote_port = 22 #module_lang = C #module_set_locale = False # plays will gather facts by default, which contain information about # the remote system. # # smart - gather by default, but don't regather if already gathered # implicit - gather by default, turn off with gather_facts: False # explicit - do not gather by default, must say gather_facts: True #gathering = implicit # This only affects the gathering done by a play's gather_facts directive, # by default gathering retrieves all facts subsets # all - gather all subsets # network - gather min and network facts # hardware - gather hardware facts (longest facts to retrieve) # virtual - gather min and virtual facts # facter - import facts from facter # ohai - import facts from ohai # You can combine them using comma (ex: network,virtual) # You can negate them using ! (ex: !hardware,!facter,!ohai) # A minimal set of facts is always gathered. #gather_subset = all # some hardware related facts are collected # with a maximum timeout of 10 seconds. This # option lets you increase or decrease that # timeout to something more suitable for the # environment. # gather_timeout = 10 # additional paths to search for roles in, colon separated #roles_path = /etc/ansible/roles # uncomment this to disable SSH key host checking #host_key_checking = False # change the default callback, you can only have one 'stdout' type enabled at a time. #stdout_callback = skippy ## Ansible ships with some plugins that require whitelisting, ## this is done to avoid running all of a type by default. ## These setting lists those that you want enabled for your system. ## Custom plugins should not need this unless plugin author specifies it. # enable callback plugins, they can output to stdout but cannot be 'stdout' type. #callback_whitelist = timer, mail # Determine whether includes in tasks and handlers are "static" by # default. As of 2.0, includes are dynamic by default. Setting these # values to True will make includes behave more like they did in the # 1.x versions. #task_includes_static = False #handler_includes_static = False # Controls if a missing handler for a notification event is an error or a warning #error_on_missing_handler = True # change this for alternative sudo implementations #sudo_exe = sudo # What flags to pass to sudo # WARNING: leaving out the defaults might create unexpected behaviours #sudo_flags = -H -S -n # SSH timeout #timeout = 10 # default user to use for playbooks if user is not specified # (/usr/bin/ansible will use current user as default) #remote_user = root # logging is off by default unless this path is defined # if so defined, consider logrotate #log_path = /var/log/ansible.log # default module name for /usr/bin/ansible #module_name = command # use this shell for commands executed under sudo # you may need to change this to bin/bash in rare instances # if sudo is constrained #executable = /bin/sh # if inventory variables overlap, does the higher precedence one win # or are hash values merged together? The default is 'replace' but # this can also be set to 'merge'. #hash_behaviour = replace # by default, variables from roles will be visible in the global variable # scope. To prevent this, the following option can be enabled, and only # tasks and handlers within the role will see the variables there #private_role_vars = yes # list any Jinja2 extensions to enable here: #jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n # if set, always use this private key file for authentication, same as # if passing --private-key to ansible or ansible-playbook #private_key_file = /path/to/file # If set, configures the path to the Vault password file as an alternative to # specifying --vault-password-file on the command line. #vault_password_file = /path/to/vault_password_file # format of string {{ ansible_managed }} available within Jinja2 # templates indicates to users editing templates files will be replaced. # replacing {file}, {host} and {uid} and strftime codes with proper values. #ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host} # {file}, {host}, {uid}, and the timestamp can all interfere with idempotence # in some situations so the default is a static string: #ansible_managed = Ansible managed # by default, ansible-playbook will display "Skipping [host]" if it determines a task # should not be run on a host. Set this to "False" if you don't want to see these "Skipping" # messages. NOTE: the task header will still be shown regardless of whether or not the # task is skipped. #display_skipped_hosts = True # by default, if a task in a playbook does not include a name: field then # ansible-playbook will construct a header that includes the task's action but # not the task's args. This is a security feature because ansible cannot know # if the *module* considers an argument to be no_log at the time that the # header is printed. If your environment doesn't have a problem securing # stdout from ansible-playbook (or you have manually specified no_log in your # playbook on all of the tasks where you have secret information) then you can # safely set this to True to get more informative messages. #display_args_to_stdout = False # by default (as of 1.3), Ansible will raise errors when attempting to dereference # Jinja2 variables that are not set in templates or action lines. Uncomment this line # to revert the behavior to pre-1.3. #error_on_undefined_vars = False # by default (as of 1.6), Ansible may display warnings based on the configuration of the # system running ansible itself. This may include warnings about 3rd party packages or # other conditions that should be resolved if possible. # to disable these warnings, set the following value to False: #system_warnings = True # by default (as of 1.4), Ansible may display deprecation warnings for language # features that should no longer be used and will be removed in future versions. # to disable these warnings, set the following value to False: #deprecation_warnings = True # (as of 1.8), Ansible can optionally warn when usage of the shell and # command module appear to be simplified by using a default Ansible module # instead. These warnings can be silenced by adjusting the following # setting or adding warn=yes or warn=no to the end of the command line # parameter string. This will for example suggest using the git module # instead of shelling out to the git command. # command_warnings = False # set plugin path directories here, separate with colons #action_plugins = /usr/share/ansible/plugins/action #cache_plugins = /usr/share/ansible/plugins/cache #callback_plugins = /usr/share/ansible/plugins/callback #connection_plugins = /usr/share/ansible/plugins/connection #lookup_plugins = /usr/share/ansible/plugins/lookup #inventory_plugins = /usr/share/ansible/plugins/inventory #vars_plugins = /usr/share/ansible/plugins/vars #filter_plugins = /usr/share/ansible/plugins/filter #test_plugins = /usr/share/ansible/plugins/test #terminal_plugins = /usr/share/ansible/plugins/terminal #strategy_plugins = /usr/share/ansible/plugins/strategy # by default, ansible will use the 'linear' strategy but you may want to try # another one #strategy = free # by default callbacks are not loaded for /bin/ansible, enable this if you # want, for example, a notification or logging callback to also apply to # /bin/ansible runs #bin_ansible_callbacks = False # don't like cows? that's unfortunate. # set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1 #nocows = 1 # set which cowsay stencil you'd like to use by default. When set to 'random', # a random stencil will be selected for each task. The selection will be filtered # against the `cow_whitelist` option below. #cow_selection = default #cow_selection = random # when using the 'random' option for cowsay, stencils will be restricted to this list. # it should be formatted as a comma-separated list with no spaces between names. # NOTE: line continuations here are for formatting purposes only, as the INI parser # in python does not support them. #cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\ # hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\ # stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www # don't like colors either? # set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1 #nocolor = 1 # if set to a persistent type (not 'memory', for example 'redis') fact values # from previous runs in Ansible will be stored. This may be useful when # wanting to use, for example, IP information from one group of servers # without having to talk to them in the same playbook run to get their # current IP information. #fact_caching = memory # retry files # When a playbook fails by default a .retry file will be created in ~/ # You can disable this feature by setting retry_files_enabled to False # and you can change the location of the files by setting retry_files_save_path #retry_files_enabled = False #retry_files_save_path = ~/.ansible-retry # squash actions # Ansible can optimise actions that call modules with list parameters # when looping. Instead of calling the module once per with_ item, the # module is called once with all items at once. Currently this only works # under limited circumstances, and only with parameters named 'name'. #squash_actions = apk,apt,dnf,homebrew,pacman,pkgng,yum,zypper # prevents logging of task data, off by default #no_log = False # prevents logging of tasks, but only on the targets, data is still logged on the master/controller #no_target_syslog = False # controls whether Ansible will raise an error or warning if a task has no # choice but to create world readable temporary files to execute a module on # the remote machine. This option is False by default for security. Users may # turn this on to have behaviour more like Ansible prior to 2.1.x. See # https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user # for more secure ways to fix this than enabling this option. #allow_world_readable_tmpfiles = False # controls the compression level of variables sent to # worker processes. At the default of 0, no compression # is used. This value must be an integer from 0 to 9. #var_compression_level = 9 # controls what compression method is used for new-style ansible modules when # they are sent to the remote system. The compression types depend on having # support compiled into both the controller's python and the client's python. # The names should match with the python Zipfile compression types: # * ZIP_STORED (no compression. available everywhere) # * ZIP_DEFLATED (uses zlib, the default) # These values may be set per host via the ansible_module_compression inventory # variable #module_compression = 'ZIP_DEFLATED' # This controls the cutoff point (in bytes) on --diff for files # set to 0 for unlimited (RAM may suffer!). #max_diff_size = 1048576 # This controls how ansible handles multiple --tags and --skip-tags arguments # on the CLI. If this is True then multiple arguments are merged together. If # it is False, then the last specified argument is used and the others are ignored. # This option will be removed in 2.8. #merge_multiple_cli_flags = True # Controls showing custom stats at the end, off by default #show_custom_stats = True # Controls which files to ignore when using a directory as inventory with # possibly multiple sources (both static and dynamic) #inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo # This family of modules use an alternative execution path optimized for network appliances # only update this setting if you know how this works, otherwise it can break module execution #network_group_modules=eos, nxos, ios, iosxr, junos, vyos # When enabled, this option allows lookups (via variables like {{lookup('foo')}} or when used as # a loop with `with_foo`) to return data that is not marked "unsafe". This means the data may contain # jinja2 templating language which will be run through the templating engine. # ENABLING THIS COULD BE A SECURITY RISK #allow_unsafe_lookups = False # set default errors for all plays #any_errors_fatal = False [inventory] # enable inventory plugins, default: 'host_list', 'script', 'yaml', 'ini' #enable_plugins = host_list, virtualbox, yaml, constructed # ignore these extensions when parsing a directory as inventory source #ignore_extensions = .pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, ~, .orig, .ini, .cfg, .retry # ignore files matching these patterns when parsing a directory as inventory source #ignore_patterns= # If 'true' unparsed inventory sources become fatal errors, they are warnings otherwise. #unparsed_is_failed=False [privilege_escalation] #become=True #become_method=sudo #become_user=root #become_ask_pass=False [paramiko_connection] # uncomment this line to cause the paramiko connection plugin to not record new host # keys encountered. Increases performance on new host additions. Setting works independently of the # host key checking setting above. #record_host_keys=False # by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this # line to disable this behaviour. #pty=False # paramiko will default to looking for SSH keys initially when trying to # authenticate to remote devices. This is a problem for some network devices # that close the connection after a key failure. Uncomment this line to # disable the Paramiko look for keys function #look_for_keys = False # When using persistent connections with Paramiko, the connection runs in a # background process. If the host doesn't already have a valid SSH key, by # default Ansible will prompt to add the host key. This will cause connections # running in background processes to fail. Uncomment this line to have # Paramiko automatically add host keys. #host_key_auto_add = True [ssh_connection] # ssh arguments to use # Leaving off ControlPersist will result in poor performance, so use # paramiko on older platforms rather than removing it, -C controls compression use #ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s # The base directory for the ControlPath sockets. # This is the "%(directory)s" in the control_path option # # Example: # control_path_dir = /tmp/.ansible/cp #control_path_dir = ~/.ansible/cp # The path to use for the ControlPath sockets. This defaults to a hashed string of the hostname, # port and username (empty string in the config). The hash mitigates a common problem users # found with long hostames and the conventional %(directory)s/ansible-ssh-%%h-%%p-%%r format. # In those cases, a "too long for Unix domain socket" ssh error would occur. # # Example: # control_path = %(directory)s/%%h-%%r #control_path = # Enabling pipelining reduces the number of SSH operations required to # execute a module on the remote server. This can result in a significant # performance improvement when enabled, however when using "sudo:" you must # first disable 'requiretty' in /etc/sudoers # # By default, this option is disabled to preserve compatibility with # sudoers configurations that have requiretty (the default on many distros). # #pipelining = False # Control the mechanism for transferring files (old) # * smart = try sftp and then try scp [default] # * True = use scp only # * False = use sftp only #scp_if_ssh = smart # Control the mechanism for transferring files (new) # If set, this will override the scp_if_ssh option # * sftp = use sftp to transfer files # * scp = use scp to transfer files # * piped = use 'dd' over SSH to transfer files # * smart = try sftp, scp, and piped, in that order [default] #transfer_method = smart # if False, sftp will not use batch mode to transfer files. This may cause some # types of file transfer failures impossible to catch however, and should # only be disabled if your sftp version has problems with batch mode #sftp_batch_mode = False # The -tt argument is passed to ssh when pipelining is not enabled because sudo # requires a tty by default. #use_tty = True [persistent_connection] # Configures the persistent connection timeout value in seconds. This value is # how long the persistent connection will remain idle before it is destroyed. # If the connection doesn't receive a request before the timeout value # expires, the connection is shutdown. The default value is 30 seconds. #connect_timeout = 30 # Configures the persistent connection retry timeout. This value configures the # the retry timeout that ansible-connection will wait to connect # to the local domain socket. This value must be larger than the # ssh timeout (timeout) and less than persistent connection idle timeout (connect_timeout). # The default value is 15 seconds. #connect_retry_timeout = 15 # The command timeout value defines the amount of time to wait for a command # or RPC call before timing out. The value for the command timeout must # be less than the value of the persistent connection idle timeout (connect_timeout) # The default value is 10 second. #command_timeout = 10 [accelerate] #accelerate_port = 5099 #accelerate_timeout = 30 #accelerate_connect_timeout = 5.0 # The daemon timeout is measured in minutes. This time is measured # from the last activity to the accelerate daemon. #accelerate_daemon_timeout = 30 # If set to yes, accelerate_multi_key will allow multiple # private keys to be uploaded to it, though each user must # have access to the system via SSH to add a new key. The default # is "no". #accelerate_multi_key = yes [selinux] # file systems that require special treatment when dealing with security context # the default behaviour that copies the existing context or uses the user default # needs to be changed to use the file system dependent context. #special_context_filesystems=nfs,vboxsf,fuse,ramfs,9p # Set this to yes to allow libvirt_lxc connections to work without SELinux. #libvirt_lxc_noseclabel = yes [colors] #highlight = white #verbose = blue #warn = bright purple #error = red #debug = dark gray #deprecate = purple #skip = cyan #unreachable = red #ok = green #changed = yellow #diff_add = green #diff_remove = red #diff_lines = cyan [diff] # Always print diff when running ( same as always running with -D/--diff ) # always = no # Set how many context lines to show in diff # context = 3 ansible-2.5.1/examples/hosts0000644000000000000000000000177013265756155016017 0ustar rootroot00000000000000# This is the default ansible 'hosts' file. # # It should live in /etc/ansible/hosts # # - Comments begin with the '#' character # - Blank lines are ignored # - Groups of hosts are delimited by [header] elements # - You can enter hostnames or ip addresses # - A hostname/ip can be a member of multiple groups # Ex 1: Ungrouped hosts, specify before any group headers. ## green.example.com ## blue.example.com ## 192.168.100.1 ## 192.168.100.10 # Ex 2: A collection of hosts belonging to the 'webservers' group ## [webservers] ## alpha.example.org ## beta.example.org ## 192.168.1.100 ## 192.168.1.110 # If you have multiple hosts following a pattern you can specify # them like this: ## www[001:006].example.com # Ex 3: A collection of database servers in the 'dbservers' group ## [dbservers] ## ## db01.intranet.mydomain.net ## db02.intranet.mydomain.net ## 10.25.1.56 ## 10.25.1.57 # Here's another example of host ranges, this time there are no # leading 0s: ## db-[99:101]-node.example.com ansible-2.5.1/lib/0000755000000000000000000000000013265756221013651 5ustar rootroot00000000000000ansible-2.5.1/lib/ansible/0000755000000000000000000000000013265756221015266 5ustar rootroot00000000000000ansible-2.5.1/lib/ansible/cli/0000755000000000000000000000000013265756221016035 5ustar rootroot00000000000000ansible-2.5.1/lib/ansible/cli/__init__.py0000644000000000000000000011603713265756155020164 0ustar rootroot00000000000000# (c) 2012-2014, Michael DeHaan # (c) 2016, Toshio Kuratomi # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import getpass import operator import optparse import os import subprocess import re import sys import time import yaml from abc import ABCMeta, abstractmethod import ansible from ansible import constants as C from ansible.errors import AnsibleOptionsError, AnsibleError from ansible.inventory.manager import InventoryManager from ansible.module_utils.six import with_metaclass, string_types from ansible.module_utils._text import to_bytes, to_text from ansible.parsing.dataloader import DataLoader from ansible.release import __version__ from ansible.utils.path import unfrackpath from ansible.utils.vars import load_extra_vars, load_options_vars from ansible.vars.manager import VariableManager from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class SortedOptParser(optparse.OptionParser): '''Optparser which sorts the options by opt before outputting --help''' def format_help(self, formatter=None, epilog=None): self.option_list.sort(key=operator.methodcaller('get_opt_string')) return optparse.OptionParser.format_help(self, formatter=None) # Note: Inherit from SortedOptParser so that we get our format_help method class InvalidOptsParser(SortedOptParser): '''Ignore invalid options. Meant for the special case where we need to take care of help and version but may not know the full range of options yet. (See it in use in set_action) ''' def __init__(self, parser): # Since this is special purposed to just handle help and version, we # take a pre-existing option parser here and set our options from # that. This allows us to give accurate help based on the given # option parser. SortedOptParser.__init__(self, usage=parser.usage, option_list=parser.option_list, option_class=parser.option_class, conflict_handler=parser.conflict_handler, description=parser.description, formatter=parser.formatter, add_help_option=False, prog=parser.prog, epilog=parser.epilog) self.version = parser.version def _process_long_opt(self, rargs, values): try: optparse.OptionParser._process_long_opt(self, rargs, values) except optparse.BadOptionError: pass def _process_short_opts(self, rargs, values): try: optparse.OptionParser._process_short_opts(self, rargs, values) except optparse.BadOptionError: pass class CLI(with_metaclass(ABCMeta, object)): ''' code behind bin/ansible* programs ''' VALID_ACTIONS = [] _ITALIC = re.compile(r"I\(([^)]+)\)") _BOLD = re.compile(r"B\(([^)]+)\)") _MODULE = re.compile(r"M\(([^)]+)\)") _URL = re.compile(r"U\(([^)]+)\)") _CONST = re.compile(r"C\(([^)]+)\)") PAGER = 'less' # -F (quit-if-one-screen) -R (allow raw ansi control chars) # -S (chop long lines) -X (disable termcap init and de-init) LESS_OPTS = 'FRSX' SKIP_INVENTORY_DEFAULTS = False def __init__(self, args, callback=None): """ Base init method for all command line programs """ self.args = args self.options = None self.parser = None self.action = None self.callback = callback def set_action(self): """ Get the action the user wants to execute from the sys argv list. """ for i in range(0, len(self.args)): arg = self.args[i] if arg in self.VALID_ACTIONS: self.action = arg del self.args[i] break if not self.action: # if we're asked for help or version, we don't need an action. # have to use a special purpose Option Parser to figure that out as # the standard OptionParser throws an error for unknown options and # without knowing action, we only know of a subset of the options # that could be legal for this command tmp_parser = InvalidOptsParser(self.parser) tmp_options, tmp_args = tmp_parser.parse_args(self.args) if not(hasattr(tmp_options, 'help') and tmp_options.help) or (hasattr(tmp_options, 'version') and tmp_options.version): raise AnsibleOptionsError("Missing required action") def execute(self): """ Actually runs a child defined method using the execute_ pattern """ fn = getattr(self, "execute_%s" % self.action) fn() @abstractmethod def run(self): """Run the ansible command Subclasses must implement this method. It does the actual work of running an Ansible command. """ display.vv(to_text(self.parser.get_version())) if C.CONFIG_FILE: display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE)) else: display.v(u"No config file found; using defaults") # warn about deprecated config options for deprecated in C.config.DEPRECATED: name = deprecated[0] why = deprecated[1]['why'] if 'alternatives' in deprecated[1]: alt = ', use %s instead' % deprecated[1]['alternatives'] else: alt = '' ver = deprecated[1]['version'] display.deprecated("%s option, %s %s" % (name, why, alt), version=ver) # warn about typing issues with configuration entries for unable in C.config.UNABLE: display.warning("Unable to set correct type for configuration entry: %s" % unable) @staticmethod def split_vault_id(vault_id): # return (before_@, after_@) # if no @, return whole string as after_ if '@' not in vault_id: return (None, vault_id) parts = vault_id.split('@', 1) ret = tuple(parts) return ret @staticmethod def build_vault_ids(vault_ids, vault_password_files=None, ask_vault_pass=None, create_new_password=None, auto_prompt=True): vault_password_files = vault_password_files or [] vault_ids = vault_ids or [] # convert vault_password_files into vault_ids slugs for password_file in vault_password_files: id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file) # note this makes --vault-id higher precendence than --vault-password-file # if we want to intertwingle them in order probably need a cli callback to populate vault_ids # used by --vault-id and --vault-password-file vault_ids.append(id_slug) # if an action needs an encrypt password (create_new_password=True) and we dont # have other secrets setup, then automatically add a password prompt as well. # prompts cant/shouldnt work without a tty, so dont add prompt secrets if ask_vault_pass or (not vault_ids and auto_prompt): id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass') vault_ids.append(id_slug) return vault_ids # TODO: remove the now unused args @staticmethod def setup_vault_secrets(loader, vault_ids, vault_password_files=None, ask_vault_pass=None, create_new_password=False, auto_prompt=True): # list of tuples vault_secrets = [] # Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id) # we need to show different prompts. This is for compat with older Towers that expect a # certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format. prompt_formats = {} # If there are configured default vault identities, they are considered 'first' # so we prepend them to vault_ids (from cli) here vault_password_files = vault_password_files or [] if C.DEFAULT_VAULT_PASSWORD_FILE: vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE) if create_new_password: prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ', 'Confirm vew vault password (%(vault_id)s): '] # 2.3 format prompts for --ask-vault-pass prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ', 'Confirm New Vault password: '] else: prompt_formats['prompt'] = ['Vault password (%(vault_id)s): '] # The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$' prompt_formats['prompt_ask_vault_pass'] = ['Vault password: '] vault_ids = CLI.build_vault_ids(vault_ids, vault_password_files, ask_vault_pass, create_new_password, auto_prompt=auto_prompt) for vault_id_slug in vault_ids: vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug) if vault_id_value in ['prompt', 'prompt_ask_vault_pass']: # --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little # confusing since it will use the old format without the vault id in the prompt built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY # choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass # always gets the old format for Tower compatibility. # ie, we used --ask-vault-pass, so we need to use the old vault password prompt # format since Tower needs to match on that format. prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value], vault_id=built_vault_id) # a empty or invalid password from the prompt will warn and continue to the next # without erroring globablly try: prompted_vault_secret.load() except AnsibleError as exc: display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc)) raise vault_secrets.append((built_vault_id, prompted_vault_secret)) # update loader with new secrets incrementally, so we can load a vault password # that is encrypted with a vault secret provided earlier loader.set_vault_secrets(vault_secrets) continue # assuming anything else is a password file display.vvvvv('Reading vault password file: %s' % vault_id_value) # read vault_pass from a file file_vault_secret = get_file_vault_secret(filename=vault_id_value, vault_id=vault_id_name, loader=loader) # an invalid password file will error globally try: file_vault_secret.load() except AnsibleError as exc: display.warning('Error in vault password file loading (%s): %s' % (vault_id_name, exc)) raise if vault_id_name: vault_secrets.append((vault_id_name, file_vault_secret)) else: vault_secrets.append((C.DEFAULT_VAULT_IDENTITY, file_vault_secret)) # update loader with as-yet-known vault secrets loader.set_vault_secrets(vault_secrets) return vault_secrets def ask_passwords(self): ''' prompt for connection and become passwords if needed ''' op = self.options sshpass = None becomepass = None become_prompt = '' become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op.become_method.upper() try: if op.ask_pass: sshpass = getpass.getpass(prompt="SSH password: ") become_prompt = "%s password[defaults to SSH password]: " % become_prompt_method if sshpass: sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr') else: become_prompt = "%s password: " % become_prompt_method if op.become_ask_pass: becomepass = getpass.getpass(prompt=become_prompt) if op.ask_pass and becomepass == '': becomepass = sshpass if becomepass: becomepass = to_bytes(becomepass) except EOFError: pass return (sshpass, becomepass) def normalize_become_options(self): ''' this keeps backwards compatibility with sudo/su self.options ''' self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER def _dep(which): display.deprecated('The %s command line option has been deprecated in favor of the "become" command line arguments' % which, '2.6') if self.options.become: pass elif self.options.sudo: self.options.become = True self.options.become_method = 'sudo' _dep('sudo') elif self.options.su: self.options.become = True self.options.become_method = 'su' _dep('su') # other deprecations: if self.options.ask_sudo_pass or self.options.sudo_user: _dep('sudo') if self.options.ask_su_pass or self.options.su_user: _dep('su') def validate_conflicts(self, vault_opts=False, runas_opts=False, fork_opts=False, vault_rekey_opts=False): ''' check for conflicting options ''' op = self.options if vault_opts: # Check for vault related conflicts if (op.ask_vault_pass and op.vault_password_files): self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") if vault_rekey_opts: if (op.new_vault_id and op.new_vault_password_file): self.parser.error("--new-vault-password-file and --new-vault-id are mutually exclusive") if runas_opts: # Check for privilege escalation conflicts if ((op.su or op.su_user) and (op.sudo or op.sudo_user) or (op.su or op.su_user) and (op.become or op.become_user) or (op.sudo or op.sudo_user) and (op.become or op.become_user)): self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') and su arguments ('--su', '--su-user', and '--ask-su-pass') " "and become arguments ('--become', '--become-user', and '--ask-become-pass') are exclusive of each other") if fork_opts: if op.forks < 1: self.parser.error("The number of processes (--forks) must be >= 1") @staticmethod def unfrack_paths(option, opt, value, parser): paths = getattr(parser.values, option.dest) if paths is None: paths = [] if isinstance(value, string_types): paths[:0] = [unfrackpath(x) for x in value.split(os.pathsep) if x] elif isinstance(value, list): paths[:0] = [unfrackpath(x) for x in value if x] else: pass # FIXME: should we raise options error? setattr(parser.values, option.dest, paths) @staticmethod def unfrack_path(option, opt, value, parser): if value != '-': setattr(parser.values, option.dest, unfrackpath(value)) else: setattr(parser.values, option.dest, value) @staticmethod def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False, async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False, runas_prompt_opts=False, desc=None, basedir_opts=False, vault_rekey_opts=False): ''' create an options parser for most ansible scripts ''' # base opts parser = SortedOptParser(usage, version=CLI.version("%prog"), description=desc, epilog=epilog) parser.add_option('-v', '--verbose', dest='verbosity', default=C.DEFAULT_VERBOSITY, action="count", help="verbose mode (-vvv for more, -vvvv to enable connection debugging)") if inventory_opts: parser.add_option('-i', '--inventory', '--inventory-file', dest='inventory', action="append", help="specify inventory host path or comma separated host list. --inventory-file is deprecated") parser.add_option('--list-hosts', dest='listhosts', action='store_true', help='outputs a list of matching hosts; does not execute anything else') parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset', help='further limit selected hosts to an additional pattern') if module_opts: parser.add_option('-M', '--module-path', dest='module_path', default=None, help="prepend colon-separated path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, action="callback", callback=CLI.unfrack_paths, type='str') if runtask_opts: parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", help="set additional variables as key=value or YAML/JSON, if filename prepend with @", default=[]) if fork_opts: parser.add_option('-f', '--forks', dest='forks', default=C.DEFAULT_FORKS, type='int', help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS) if vault_opts: parser.add_option('--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true', help='ask for vault password') parser.add_option('--vault-password-file', default=[], dest='vault_password_files', help="vault password file", action="callback", callback=CLI.unfrack_paths, type='string') parser.add_option('--vault-id', default=[], dest='vault_ids', action='append', type='string', help='the vault identity to use') if vault_rekey_opts: parser.add_option('--new-vault-password-file', default=None, dest='new_vault_password_file', help="new vault password file for rekey", action="callback", callback=CLI.unfrack_path, type='string') parser.add_option('--new-vault-id', default=None, dest='new_vault_id', type='string', help='the new vault identity to use for rekey') if subset_opts: parser.add_option('-t', '--tags', dest='tags', default=C.TAGS_RUN, action='append', help="only run plays and tasks tagged with these values") parser.add_option('--skip-tags', dest='skip_tags', default=C.TAGS_SKIP, action='append', help="only run plays and tasks whose tags do not match these values") if output_opts: parser.add_option('-o', '--one-line', dest='one_line', action='store_true', help='condense output') parser.add_option('-t', '--tree', dest='tree', default=None, help='log output to this directory') if connect_opts: connect_group = optparse.OptionGroup(parser, "Connection Options", "control as whom and how to connect to hosts") connect_group.add_option('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true', help='ask for connection password') connect_group.add_option('--private-key', '--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', help='use this file to authenticate the connection', action="callback", callback=CLI.unfrack_path, type='string') connect_group.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user', help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER) connect_group.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT, help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT) connect_group.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout', help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT) connect_group.add_option('--ssh-common-args', default='', dest='ssh_common_args', help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)") connect_group.add_option('--sftp-extra-args', default='', dest='sftp_extra_args', help="specify extra arguments to pass to sftp only (e.g. -f, -l)") connect_group.add_option('--scp-extra-args', default='', dest='scp_extra_args', help="specify extra arguments to pass to scp only (e.g. -l)") connect_group.add_option('--ssh-extra-args', default='', dest='ssh_extra_args', help="specify extra arguments to pass to ssh only (e.g. -R)") parser.add_option_group(connect_group) runas_group = None rg = optparse.OptionGroup(parser, "Privilege Escalation Options", "control how and which user you become as on target hosts") if runas_opts: runas_group = rg # priv user defaults to root later on to enable detecting when this option was given here runas_group.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo', help="run operations with sudo (nopasswd) (deprecated, use become)") runas_group.add_option('-U', '--sudo-user', dest='sudo_user', default=None, help='desired sudo user (default=root) (deprecated, use become)') runas_group.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true', help='run operations with su (deprecated, use become)') runas_group.add_option('-R', '--su-user', default=None, help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER) # consolidated privilege escalation (become) runas_group.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become', help="run operations with become (does not imply password prompting)") runas_group.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='choice', choices=C.BECOME_METHODS, help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS))) runas_group.add_option('--become-user', default=None, dest='become_user', type='string', help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER) if runas_opts or runas_prompt_opts: if not runas_group: runas_group = rg runas_group.add_option('--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true', help='ask for sudo password (deprecated, use become)') runas_group.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true', help='ask for su password (deprecated, use become)') runas_group.add_option('-K', '--ask-become-pass', default=False, dest='become_ask_pass', action='store_true', help='ask for privilege escalation password') if runas_group: parser.add_option_group(runas_group) if async_opts: parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', dest='poll_interval', help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL) parser.add_option('-B', '--background', dest='seconds', type='int', default=0, help='run asynchronously, failing after X seconds (default=N/A)') if check_opts: parser.add_option("-C", "--check", default=False, dest='check', action='store_true', help="don't make any changes; instead, try to predict some of the changes that may occur") parser.add_option('--syntax-check', dest='syntax', action='store_true', help="perform a syntax check on the playbook, but do not execute it") parser.add_option("-D", "--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true', help="when changing (small) files and templates, show the differences in those files; works great with --check") if meta_opts: parser.add_option('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true', help="run handlers even if a task fails") parser.add_option('--flush-cache', dest='flush_cache', action='store_true', help="clear the fact cache for every host in inventory") if basedir_opts: parser.add_option('--playbook-dir', default=None, dest='basedir', action='store', help="Since this tool does not use playbooks, use this as a subsitute playbook directory." "This sets the relative path for many features including roles/ group_vars/ etc.") return parser @abstractmethod def parse(self): """Parse the command line args This method parses the command line arguments. It uses the parser stored in the self.parser attribute and saves the args and options in self.args and self.options respectively. Subclasses need to implement this method. They will usually create a base_parser, add their own options to the base_parser, and then call this method to do the actual parsing. An implementation will look something like this:: def parse(self): parser = super(MyCLI, self).base_parser(usage="My Ansible CLI", inventory_opts=True) parser.add_option('--my-option', dest='my_option', action='store') self.parser = parser super(MyCLI, self).parse() # If some additional transformations are needed for the # arguments and options, do it here. """ self.options, self.args = self.parser.parse_args(self.args[1:]) # process tags if hasattr(self.options, 'tags') and not self.options.tags: # optparse defaults does not do what's expected self.options.tags = ['all'] if hasattr(self.options, 'tags') and self.options.tags: if not C.MERGE_MULTIPLE_CLI_TAGS: if len(self.options.tags) > 1: display.deprecated('Specifying --tags multiple times on the command line currently uses the last specified value. ' 'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.', version=2.5, removed=False) self.options.tags = [self.options.tags[-1]] tags = set() for tag_set in self.options.tags: for tag in tag_set.split(u','): tags.add(tag.strip()) self.options.tags = list(tags) # process skip_tags if hasattr(self.options, 'skip_tags') and self.options.skip_tags: if not C.MERGE_MULTIPLE_CLI_TAGS: if len(self.options.skip_tags) > 1: display.deprecated('Specifying --skip-tags multiple times on the command line currently uses the last specified value. ' 'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.', version=2.5, removed=False) self.options.skip_tags = [self.options.skip_tags[-1]] skip_tags = set() for tag_set in self.options.skip_tags: for tag in tag_set.split(u','): skip_tags.add(tag.strip()) self.options.skip_tags = list(skip_tags) # process inventory options except for CLIs that require their own processing if hasattr(self.options, 'inventory') and not self.SKIP_INVENTORY_DEFAULTS: if self.options.inventory: # should always be list if isinstance(self.options.inventory, string_types): self.options.inventory = [self.options.inventory] # Ensure full paths when needed self.options.inventory = [unfrackpath(opt, follow=False) if ',' not in opt else opt for opt in self.options.inventory] else: self.options.inventory = C.DEFAULT_HOST_LIST @staticmethod def version(prog): ''' return ansible version ''' result = "{0} {1}".format(prog, __version__) gitinfo = CLI._gitinfo() if gitinfo: result = result + " {0}".format(gitinfo) result += "\n config file = %s" % C.CONFIG_FILE if C.DEFAULT_MODULE_PATH is None: cpath = "Default w/o overrides" else: cpath = C.DEFAULT_MODULE_PATH result = result + "\n configured module search path = %s" % cpath result = result + "\n ansible python module location = %s" % ':'.join(ansible.__path__) result = result + "\n executable location = %s" % sys.argv[0] result = result + "\n python version = %s" % ''.join(sys.version.splitlines()) return result @staticmethod def version_info(gitinfo=False): ''' return full ansible version info ''' if gitinfo: # expensive call, user with care ansible_version_string = CLI.version('') else: ansible_version_string = __version__ ansible_version = ansible_version_string.split()[0] ansible_versions = ansible_version.split('.') for counter in range(len(ansible_versions)): if ansible_versions[counter] == "": ansible_versions[counter] = 0 try: ansible_versions[counter] = int(ansible_versions[counter]) except: pass if len(ansible_versions) < 3: for counter in range(len(ansible_versions), 3): ansible_versions.append(0) return {'string': ansible_version_string.strip(), 'full': ansible_version, 'major': ansible_versions[0], 'minor': ansible_versions[1], 'revision': ansible_versions[2]} @staticmethod def _git_repo_info(repo_path): ''' returns a string containing git branch, commit id and commit date ''' result = None if os.path.exists(repo_path): # Check if the .git is a file. If it is a file, it means that we are in a submodule structure. if os.path.isfile(repo_path): try: gitdir = yaml.safe_load(open(repo_path)).get('gitdir') # There is a possibility the .git file to have an absolute path. if os.path.isabs(gitdir): repo_path = gitdir else: repo_path = os.path.join(repo_path[:-4], gitdir) except (IOError, AttributeError): return '' f = open(os.path.join(repo_path, "HEAD")) line = f.readline().rstrip("\n") if line.startswith("ref:"): branch_path = os.path.join(repo_path, line[5:]) else: branch_path = None f.close() if branch_path and os.path.exists(branch_path): branch = '/'.join(line.split('/')[2:]) f = open(branch_path) commit = f.readline()[:10] f.close() else: # detached HEAD commit = line[:10] branch = 'detached HEAD' branch_path = os.path.join(repo_path, "HEAD") date = time.localtime(os.stat(branch_path).st_mtime) if time.daylight == 0: offset = time.timezone else: offset = time.altzone result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36)) else: result = '' return result @staticmethod def _gitinfo(): basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..') repo_path = os.path.join(basedir, '.git') result = CLI._git_repo_info(repo_path) submodules = os.path.join(basedir, '.gitmodules') if not os.path.exists(submodules): return result f = open(submodules) for line in f: tokens = line.strip().split(' ') if tokens[0] == 'path': submodule_path = tokens[2] submodule_info = CLI._git_repo_info(os.path.join(basedir, submodule_path, '.git')) if not submodule_info: submodule_info = ' not found - use git submodule update --init ' + submodule_path result += "\n {0}: {1}".format(submodule_path, submodule_info) f.close() return result def pager(self, text): ''' find reasonable way to display text ''' # this is a much simpler form of what is in pydoc.py if not sys.stdout.isatty(): display.display(text, screen_only=True) elif 'PAGER' in os.environ: if sys.platform == 'win32': display.display(text, screen_only=True) else: self.pager_pipe(text, os.environ['PAGER']) else: p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.communicate() if p.returncode == 0: self.pager_pipe(text, 'less') else: display.display(text, screen_only=True) @staticmethod def pager_pipe(text, cmd): ''' pipe text through a pager ''' if 'LESS' not in os.environ: os.environ['LESS'] = CLI.LESS_OPTS try: cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout) cmd.communicate(input=to_bytes(text)) except IOError: pass except KeyboardInterrupt: pass @classmethod def tty_ify(cls, text): t = cls._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word' t = cls._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word* t = cls._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word] t = cls._URL.sub(r"\1", t) # U(word) => word t = cls._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word' return t @staticmethod def _play_prereqs(options): # all needs loader loader = DataLoader() basedir = getattr(options, 'basedir', False) if basedir: loader.set_basedir(basedir) vault_ids = options.vault_ids default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST vault_ids = default_vault_ids + vault_ids vault_secrets = CLI.setup_vault_secrets(loader, vault_ids=vault_ids, vault_password_files=options.vault_password_files, ask_vault_pass=options.ask_vault_pass, auto_prompt=False) loader.set_vault_secrets(vault_secrets) # create the inventory, and filter it based on the subset specified (if any) inventory = InventoryManager(loader=loader, sources=options.inventory) # create the variable manager, which will be shared throughout # the code, ensuring a consistent view of global variables variable_manager = VariableManager(loader=loader, inventory=inventory) # load vars from cli options variable_manager.extra_vars = load_extra_vars(loader=loader, options=options) variable_manager.options_vars = load_options_vars(options, CLI.version_info(gitinfo=False)) return loader, inventory, variable_manager @staticmethod def get_host_list(inventory, subset, pattern='all'): no_hosts = False if len(inventory.list_hosts()) == 0: # Empty inventory display.warning("provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'") no_hosts = True inventory.subset(subset) hosts = inventory.list_hosts(pattern) if len(hosts) == 0 and no_hosts is False: raise AnsibleError("Specified hosts and/or --limit does not match any hosts") return hosts ansible-2.5.1/lib/ansible/cli/adhoc.py0000644000000000000000000001562013265756155017477 0ustar rootroot00000000000000# (c) 2012, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . from __future__ import (absolute_import, division, print_function) __metaclass__ = type ######################################################## import os from ansible import constants as C from ansible.cli import CLI from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.executor.task_queue_manager import TaskQueueManager from ansible.module_utils._text import to_text from ansible.parsing.splitter import parse_kv from ansible.playbook import Playbook from ansible.playbook.play import Play from ansible.plugins.loader import get_all_plugin_loaders try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() ######################################################## class AdHocCLI(CLI): ''' is an extra-simple tool/framework/API for doing 'remote things'. this command allows you to define and run a single task 'playbook' against a set of hosts ''' def parse(self): ''' create an options parser for bin/ansible ''' self.parser = CLI.base_parser( usage='%prog [options]', runas_opts=True, inventory_opts=True, async_opts=True, output_opts=True, connect_opts=True, check_opts=True, runtask_opts=True, vault_opts=True, fork_opts=True, module_opts=True, basedir_opts=True, desc="Define and run a single task 'playbook' against a set of hosts", epilog="Some modules do not make sense in Ad-Hoc (include, meta, etc)", ) # options unique to ansible ad-hoc self.parser.add_option('-a', '--args', dest='module_args', help="module arguments", default=C.DEFAULT_MODULE_ARGS) self.parser.add_option('-m', '--module-name', dest='module_name', help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME, default=C.DEFAULT_MODULE_NAME) super(AdHocCLI, self).parse() if len(self.args) < 1: raise AnsibleOptionsError("Missing target hosts") elif len(self.args) > 1: raise AnsibleOptionsError("Extraneous options or arguments") display.verbosity = self.options.verbosity self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True) def _play_ds(self, pattern, async_val, poll): check_raw = self.options.module_name in ('command', 'win_command', 'shell', 'win_shell', 'script', 'raw') return dict( name="Ansible Ad-Hoc", hosts=pattern, gather_facts='no', tasks=[dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args, check_raw=check_raw)), async_val=async_val, poll=poll)] ) def run(self): ''' create and execute the single task playbook ''' super(AdHocCLI, self).run() # only thing left should be host pattern pattern = to_text(self.args[0], errors='surrogate_or_strict') sshpass = None becomepass = None self.normalize_become_options() (sshpass, becomepass) = self.ask_passwords() passwords = {'conn_pass': sshpass, 'become_pass': becomepass} # dynamically load any plugins get_all_plugin_loaders() loader, inventory, variable_manager = self._play_prereqs(self.options) try: hosts = CLI.get_host_list(inventory, self.options.subset, pattern) except AnsibleError: if self.options.subset: raise else: hosts = [] display.warning("No hosts matched, nothing to do") if self.options.listhosts: display.display(' hosts (%d):' % len(hosts)) for host in hosts: display.display(' %s' % host) return 0 if self.options.module_name in C.MODULE_REQUIRE_ARGS and not self.options.module_args: err = "No argument passed to %s module" % self.options.module_name if pattern.endswith(".yml"): err = err + ' (did you mean to run ansible-playbook?)' raise AnsibleOptionsError(err) # Avoid modules that don't work with ad-hoc if self.options.module_name.startswith(('include', 'import_')): raise AnsibleOptionsError("'%s' is not a valid action for ad-hoc commands" % self.options.module_name) play_ds = self._play_ds(pattern, self.options.seconds, self.options.poll_interval) play = Play().load(play_ds, variable_manager=variable_manager, loader=loader) # used in start callback playbook = Playbook(loader) playbook._entries.append(play) playbook._file_name = '__adhoc_playbook__' if self.callback: cb = self.callback elif self.options.one_line: cb = 'oneline' # Respect custom 'stdout_callback' only with enabled 'bin_ansible_callbacks' elif C.DEFAULT_LOAD_CALLBACK_PLUGINS and C.DEFAULT_STDOUT_CALLBACK != 'default': cb = C.DEFAULT_STDOUT_CALLBACK else: cb = 'minimal' run_tree = False if self.options.tree: C.DEFAULT_CALLBACK_WHITELIST.append('tree') C.TREE_DIR = self.options.tree run_tree = True # now create a task queue manager to execute the play self._tqm = None try: self._tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options, passwords=passwords, stdout_callback=cb, run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, run_tree=run_tree, ) self._tqm.send_callback('v2_playbook_on_start', playbook) result = self._tqm.run(play) self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) finally: if self._tqm: self._tqm.cleanup() if loader: loader.cleanup_all_tmp_files() return result ansible-2.5.1/lib/ansible/cli/config.py0000644000000000000000000001506113265756155017665 0ustar rootroot00000000000000# Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import shlex import subprocess import sys import yaml from ansible.cli import CLI from ansible.config.manager import ConfigManager, Setting, find_ini_config_file from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.module_utils._text import to_native, to_text, to_bytes from ansible.parsing.yaml.dumper import AnsibleDumper from ansible.utils.color import stringc from ansible.utils.path import unfrackpath try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class ConfigCLI(CLI): """ Config command line class """ VALID_ACTIONS = ("view", "dump", "list") # TODO: edit, update, search def __init__(self, args, callback=None): self.config_file = None self.config = None super(ConfigCLI, self).__init__(args, callback) def parse(self): self.parser = CLI.base_parser( usage="usage: %%prog [%s] [--help] [options] [ansible.cfg]" % "|".join(self.VALID_ACTIONS), epilog="\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]), desc="View, edit, and manage ansible configuration.", ) self.parser.add_option('-c', '--config', dest='config_file', help="path to configuration file, defaults to first file found in precedence.") self.set_action() # options specific to self.actions if self.action == "list": self.parser.set_usage("usage: %prog list [options] ") if self.action == "dump": self.parser.add_option('--only-changed', dest='only_changed', action='store_true', help="Only show configurations that have changed from the default") elif self.action == "update": self.parser.add_option('-s', '--setting', dest='setting', help="config setting, the section defaults to 'defaults'") self.parser.set_usage("usage: %prog update [options] [-c ansible.cfg] -s '[section.]setting=value'") elif self.action == "search": self.parser.set_usage("usage: %prog update [options] [-c ansible.cfg] ") self.options, self.args = self.parser.parse_args() display.verbosity = self.options.verbosity def run(self): super(ConfigCLI, self).run() if self.options.config_file: self.config_file = unfrackpath(self.options.config_file, follow=False) self.config = ConfigManager(self.config_file) else: self.config = ConfigManager() self.config_file = find_ini_config_file() if self.config_file: try: if not os.path.exists(self.config_file): raise AnsibleOptionsError("%s does not exist or is not accessible" % (self.config_file)) elif not os.path.isfile(self.config_file): raise AnsibleOptionsError("%s is not a valid file" % (self.config_file)) os.environ['ANSIBLE_CONFIG'] = to_native(self.config_file) except: if self.action in ['view']: raise elif self.action in ['edit', 'update']: display.warning("File does not exist, used empty file: %s" % self.config_file) elif self.action == 'view': raise AnsibleError('Invalid or no config file was supplied') self.execute() def execute_update(self): ''' Updates a single setting in the specified ansible.cfg ''' raise AnsibleError("Option not implemented yet") # pylint: disable=unreachable if self.options.setting is None: raise AnsibleOptionsError("update option requries a setting to update") (entry, value) = self.options.setting.split('=') if '.' in entry: (section, option) = entry.split('.') else: section = 'defaults' option = entry subprocess.call([ 'ansible', '-m', 'ini_file', 'localhost', '-c', 'local', '-a', '"dest=%s section=%s option=%s value=%s backup=yes"' % (self.config_file, section, option, value) ]) def execute_view(self): ''' Displays the current config file ''' try: with open(self.config_file, 'rb') as f: self.pager(to_text(f.read(), errors='surrogate_or_strict')) except Exception as e: raise AnsibleError("Failed to open config file: %s" % to_native(e)) def execute_edit(self): ''' Opens ansible.cfg in the default EDITOR ''' raise AnsibleError("Option not implemented yet") # pylint: disable=unreachable try: editor = shlex.split(os.environ.get('EDITOR', 'vi')) editor.append(self.config_file) subprocess.call(editor) except Exception as e: raise AnsibleError("Failed to open editor: %s" % to_native(e)) def execute_list(self): ''' list all current configs reading lib/constants.py and shows env and config file setting names ''' self.pager(to_text(yaml.dump(self.config.get_configuration_definitions(), Dumper=AnsibleDumper), errors='surrogate_or_strict')) def execute_dump(self): ''' Shows the current settings, merges ansible.cfg if specified ''' # FIXME: deal with plugins, not just base config text = [] defaults = self.config.get_configuration_definitions().copy() for setting in self.config.data.get_settings(): if setting.name in defaults: defaults[setting.name] = setting for setting in sorted(defaults): if isinstance(defaults[setting], Setting): if defaults[setting].origin == 'default': color = 'green' else: color = 'yellow' msg = "%s(%s) = %s" % (setting, defaults[setting].origin, defaults[setting].value) else: color = 'green' msg = "%s(%s) = %s" % (setting, 'default', defaults[setting].get('default')) if not self.options.only_changed or color == 'yellow': text.append(stringc(msg, color)) self.pager(to_text('\n'.join(text), errors='surrogate_or_strict')) ansible-2.5.1/lib/ansible/cli/console.py0000644000000000000000000003632413265756155020067 0ustar rootroot00000000000000# (c) 2014, Nandor Sivok # (c) 2016, Redhat Inc # # ansible-console is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ansible-console is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # from __future__ import (absolute_import, division, print_function) __metaclass__ = type ######################################################## # ansible-console is an interactive REPL shell for ansible # with built-in tab completion for all the documented modules # # Available commands: # cd - change host/group (you can use host patterns eg.: app*.dc*:!app01*) # list - list available hosts in the current path # forks - change fork # become - become # ! - forces shell module instead of the ansible module (!yum update -y) import atexit import cmd import getpass import readline import os import sys from ansible import constants as C from ansible.cli import CLI from ansible.errors import AnsibleError from ansible.executor.task_queue_manager import TaskQueueManager from ansible.module_utils._text import to_native, to_text from ansible.module_utils.parsing.convert_bool import boolean from ansible.parsing.splitter import parse_kv from ansible.playbook.play import Play from ansible.plugins.loader import module_loader, fragment_loader from ansible.utils import plugin_docs from ansible.utils.color import stringc try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class ConsoleCLI(CLI, cmd.Cmd): ''' a REPL that allows for running ad-hoc tasks against a chosen inventory (based on dominis' ansible-shell).''' modules = [] ARGUMENTS = {'host-pattern': 'A name of a group in the inventory, a shell-like glob ' 'selecting hosts in inventory or any combination of the two separated by commas.'} def __init__(self, args): super(ConsoleCLI, self).__init__(args) self.intro = 'Welcome to the ansible console.\nType help or ? to list commands.\n' self.groups = [] self.hosts = [] self.pattern = None self.variable_manager = None self.loader = None self.passwords = dict() self.modules = None cmd.Cmd.__init__(self) def parse(self): self.parser = CLI.base_parser( usage='%prog [] [options]', runas_opts=True, inventory_opts=True, connect_opts=True, check_opts=True, vault_opts=True, fork_opts=True, module_opts=True, basedir_opts=True, desc="REPL console for executing Ansible tasks.", epilog="This is not a live session/connection, each task executes in the background and returns it's results." ) # options unique to shell self.parser.add_option('--step', dest='step', action='store_true', help="one-step-at-a-time: confirm each task before running") self.parser.set_defaults(cwd='*') super(ConsoleCLI, self).parse() display.verbosity = self.options.verbosity self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True) def get_names(self): return dir(self) def cmdloop(self): try: cmd.Cmd.cmdloop(self) except KeyboardInterrupt: self.do_exit(self) def set_prompt(self): login_user = self.options.remote_user or getpass.getuser() self.selected = self.inventory.list_hosts(self.options.cwd) prompt = "%s@%s (%d)[f:%s]" % (login_user, self.options.cwd, len(self.selected), self.options.forks) if self.options.become and self.options.become_user in [None, 'root']: prompt += "# " color = C.COLOR_ERROR else: prompt += "$ " color = C.COLOR_HIGHLIGHT self.prompt = stringc(prompt, color) def list_modules(self): modules = set() if self.options.module_path: for path in self.options.module_path: if path: module_loader.add_directory(path) module_paths = module_loader._get_paths() for path in module_paths: if path is not None: modules.update(self._find_modules_in_path(path)) return modules def _find_modules_in_path(self, path): if os.path.isdir(path): for module in os.listdir(path): if module.startswith('.'): continue elif os.path.isdir(module): self._find_modules_in_path(module) elif module.startswith('__'): continue elif any(module.endswith(x) for x in C.BLACKLIST_EXTS): continue elif module in C.IGNORE_FILES: continue elif module.startswith('_'): fullpath = '/'.join([path, module]) if os.path.islink(fullpath): # avoids aliases continue module = module.replace('_', '', 1) module = os.path.splitext(module)[0] # removes the extension yield module def default(self, arg, forceshell=False): """ actually runs modules """ if arg.startswith("#"): return False if not self.options.cwd: display.error("No host found") return False if arg.split()[0] in self.modules: module = arg.split()[0] module_args = ' '.join(arg.split()[1:]) else: module = 'shell' module_args = arg if forceshell is True: module = 'shell' module_args = arg self.options.module_name = module result = None try: check_raw = self.options.module_name in ('command', 'shell', 'script', 'raw') play_ds = dict( name="Ansible Shell", hosts=self.options.cwd, gather_facts='no', tasks=[dict(action=dict(module=module, args=parse_kv(module_args, check_raw=check_raw)))] ) play = Play().load(play_ds, variable_manager=self.variable_manager, loader=self.loader) except Exception as e: display.error(u"Unable to build command: %s" % to_text(e)) return False try: cb = 'minimal' # FIXME: make callbacks configurable # now create a task queue manager to execute the play self._tqm = None try: self._tqm = TaskQueueManager( inventory=self.inventory, variable_manager=self.variable_manager, loader=self.loader, options=self.options, passwords=self.passwords, stdout_callback=cb, run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, run_tree=False, ) result = self._tqm.run(play) finally: if self._tqm: self._tqm.cleanup() if self.loader: self.loader.cleanup_all_tmp_files() if result is None: display.error("No hosts found") return False except KeyboardInterrupt: display.error('User interrupted execution') return False except Exception as e: display.error(to_text(e)) # FIXME: add traceback in very very verbose mode return False def emptyline(self): return def do_shell(self, arg): """ You can run shell commands through the shell module. eg.: shell ps uax | grep java | wc -l shell killall python shell halt -n You can use the ! to force the shell module. eg.: !ps aux | grep java | wc -l """ self.default(arg, True) def do_forks(self, arg): """Set the number of forks""" if not arg: display.display('Usage: forks ') return self.options.forks = int(arg) self.set_prompt() do_serial = do_forks def do_verbosity(self, arg): """Set verbosity level""" if not arg: display.display('Usage: verbosity ') else: display.verbosity = int(arg) display.v('verbosity level set to %s' % arg) def do_cd(self, arg): """ Change active host/group. You can use hosts patterns as well eg.: cd webservers cd webservers:dbservers cd webservers:!phoenix cd webservers:&staging cd webservers:dbservers:&staging:!phoenix """ if not arg: self.options.cwd = '*' elif arg in '/*': self.options.cwd = 'all' elif self.inventory.get_hosts(arg): self.options.cwd = arg else: display.display("no host matched") self.set_prompt() def do_list(self, arg): """List the hosts in the current group""" if arg == 'groups': for group in self.groups: display.display(group) else: for host in self.selected: display.display(host.name) def do_become(self, arg): """Toggle whether plays run with become""" if arg: self.options.become = boolean(arg, strict=False) display.v("become changed to %s" % self.options.become) self.set_prompt() else: display.display("Please specify become value, e.g. `become yes`") def do_remote_user(self, arg): """Given a username, set the remote user plays are run by""" if arg: self.options.remote_user = arg self.set_prompt() else: display.display("Please specify a remote user, e.g. `remote_user root`") def do_become_user(self, arg): """Given a username, set the user that plays are run by when using become""" if arg: self.options.become_user = arg else: display.display("Please specify a user, e.g. `become_user jenkins`") display.v("Current user is %s" % self.options.become_user) self.set_prompt() def do_become_method(self, arg): """Given a become_method, set the privilege escalation method when using become""" if arg: self.options.become_method = arg display.v("become_method changed to %s" % self.options.become_method) else: display.display("Please specify a become_method, e.g. `become_method su`") def do_check(self, arg): """Toggle whether plays run with check mode""" if arg: self.options.check = boolean(arg, strict=False) display.v("check mode changed to %s" % self.options.check) else: display.display("Please specify check mode value, e.g. `check yes`") def do_diff(self, arg): """Toggle whether plays run with diff""" if arg: self.options.diff = boolean(arg, strict=False) display.v("diff mode changed to %s" % self.options.diff) else: display.display("Please specify a diff value , e.g. `diff yes`") def do_exit(self, args): """Exits from the console""" sys.stdout.write('\n') return -1 do_EOF = do_exit def helpdefault(self, module_name): if module_name in self.modules: in_path = module_loader.find_plugin(module_name) if in_path: oc, a, _, _ = plugin_docs.get_docstring(in_path, fragment_loader) if oc: display.display(oc['short_description']) display.display('Parameters:') for opt in oc['options'].keys(): display.display(' ' + stringc(opt, C.COLOR_HIGHLIGHT) + ' ' + oc['options'][opt]['description'][0]) else: display.error('No documentation found for %s.' % module_name) else: display.error('%s is not a valid command, use ? to list all valid commands.' % module_name) def complete_cd(self, text, line, begidx, endidx): mline = line.partition(' ')[2] offs = len(mline) - len(text) if self.options.cwd in ('all', '*', '\\'): completions = self.hosts + self.groups else: completions = [x.name for x in self.inventory.list_hosts(self.options.cwd)] return [to_native(s)[offs:] for s in completions if to_native(s).startswith(to_native(mline))] def completedefault(self, text, line, begidx, endidx): if line.split()[0] in self.modules: mline = line.split(' ')[-1] offs = len(mline) - len(text) completions = self.module_args(line.split()[0]) return [s[offs:] + '=' for s in completions if s.startswith(mline)] def module_args(self, module_name): in_path = module_loader.find_plugin(module_name) oc, a, _, _ = plugin_docs.get_docstring(in_path, fragment_loader) return list(oc['options'].keys()) def run(self): super(ConsoleCLI, self).run() sshpass = None becomepass = None # hosts if len(self.args) != 1: self.pattern = 'all' else: self.pattern = self.args[0] self.options.cwd = self.pattern # dynamically add modules as commands self.modules = self.list_modules() for module in self.modules: setattr(self, 'do_' + module, lambda arg, module=module: self.default(module + ' ' + arg)) setattr(self, 'help_' + module, lambda module=module: self.helpdefault(module)) self.normalize_become_options() (sshpass, becomepass) = self.ask_passwords() self.passwords = {'conn_pass': sshpass, 'become_pass': becomepass} self.loader, self.inventory, self.variable_manager = self._play_prereqs(self.options) hosts = CLI.get_host_list(self.inventory, self.options.subset, self.pattern) self.groups = self.inventory.list_groups() self.hosts = [x.name for x in hosts] # This hack is to work around readline issues on a mac: # http://stackoverflow.com/a/7116997/541202 if 'libedit' in readline.__doc__: readline.parse_and_bind("bind ^I rl_complete") else: readline.parse_and_bind("tab: complete") histfile = os.path.join(os.path.expanduser("~"), ".ansible-console_history") try: readline.read_history_file(histfile) except IOError: pass atexit.register(readline.write_history_file, histfile) self.set_prompt() self.cmdloop() ansible-2.5.1/lib/ansible/cli/doc.py0000644000000000000000000005373613265756155017200 0ustar rootroot00000000000000# (c) 2014, James Tanner # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . from __future__ import (absolute_import, division, print_function) __metaclass__ = type import datetime import os import textwrap import traceback import yaml from ansible import constants as C from ansible.cli import CLI from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.module_utils._text import to_native from ansible.module_utils.six import string_types from ansible.parsing.yaml.dumper import AnsibleDumper from ansible.plugins.loader import module_loader, action_loader, lookup_loader, callback_loader, cache_loader, \ vars_loader, connection_loader, strategy_loader, inventory_loader, shell_loader, fragment_loader from ansible.utils.plugin_docs import BLACKLIST, get_docstring try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class DocCLI(CLI): ''' displays information on modules installed in Ansible libraries. It displays a terse listing of plugins and their short descriptions, provides a printout of their DOCUMENTATION strings, and it can create a short "snippet" which can be pasted into a playbook. ''' # default ignore list for detailed views IGNORE = ('module', 'docuri', 'version_added', 'short_description', 'now_date', 'plainexamples', 'returndocs') def __init__(self, args): super(DocCLI, self).__init__(args) self.plugin_list = set() def parse(self): self.parser = CLI.base_parser( usage='usage: %prog [-l|-F|-s] [options] [-t ] [plugin]', module_opts=True, desc="plugin documentation tool", epilog="See man pages for Ansible CLI options or website for tutorials https://docs.ansible.com" ) self.parser.add_option("-F", "--list_files", action="store_true", default=False, dest="list_files", help='Show plugin names and their source files without summaries (implies --list)') self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir', help='List available plugins') self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet', help='Show playbook snippet for specified plugin(s)') self.parser.add_option("-a", "--all", action="store_true", default=False, dest='all_plugins', help='**For internal testing only** Show documentation for all plugins.') self.parser.add_option("-t", "--type", action="store", default='module', dest='type', type='choice', help='Choose which plugin type (defaults to "module")', choices=C.DOCUMENTABLE_PLUGINS) super(DocCLI, self).parse() if [self.options.all_plugins, self.options.list_dir, self.options.list_files, self.options.show_snippet].count(True) > 1: raise AnsibleOptionsError("Only one of -l, -F, -s or -a can be used at the same time.") display.verbosity = self.options.verbosity def run(self): super(DocCLI, self).run() plugin_type = self.options.type # choose plugin type if plugin_type == 'cache': loader = cache_loader elif plugin_type == 'callback': loader = callback_loader elif plugin_type == 'connection': loader = connection_loader elif plugin_type == 'lookup': loader = lookup_loader elif plugin_type == 'strategy': loader = strategy_loader elif plugin_type == 'vars': loader = vars_loader elif plugin_type == 'inventory': loader = inventory_loader elif plugin_type == 'shell': loader = shell_loader else: loader = module_loader # add to plugin path from command line if self.options.module_path: for path in self.options.module_path: if path: loader.add_directory(path) # save only top level paths for errors search_paths = DocCLI.print_paths(loader) loader._paths = None # reset so we can use subdirs below # list plugins names and filepath for type if self.options.list_files: paths = loader._get_paths() for path in paths: self.find_plugins(path, plugin_type) list_text = self.get_plugin_list_filenames(loader) self.pager(list_text) return 0 # list plugins for type if self.options.list_dir: paths = loader._get_paths() for path in paths: self.find_plugins(path, plugin_type) self.pager(self.get_plugin_list_text(loader)) return 0 # process all plugins of type if self.options.all_plugins: paths = loader._get_paths() for path in paths: self.find_plugins(path, plugin_type) self.args = sorted(set(self.plugin_list)) if len(self.args) == 0: raise AnsibleOptionsError("Incorrect options passed") # process command line list text = '' for plugin in self.args: try: # if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs filename = loader.find_plugin(plugin, mod_type='.py', ignore_deprecated=True, check_aliases=True) if filename is None: display.warning("%s %s not found in:\n%s\n" % (plugin_type, plugin, search_paths)) continue if any(filename.endswith(x) for x in C.BLACKLIST_EXTS): continue try: doc, plainexamples, returndocs, metadata = get_docstring(filename, fragment_loader, verbose=(self.options.verbosity > 0)) except: display.vvv(traceback.format_exc()) display.error("%s %s has a documentation error formatting or is missing documentation." % (plugin_type, plugin), wrap_text=False) continue if doc is not None: # assign from other sections doc['plainexamples'] = plainexamples doc['returndocs'] = returndocs doc['metadata'] = metadata # generate extra data if plugin_type == 'module': # is there corresponding action plugin? if plugin in action_loader: doc['action'] = True else: doc['action'] = False doc['filename'] = filename doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') if 'docuri' in doc: doc['docuri'] = doc[plugin_type].replace('_', '-') if self.options.show_snippet and plugin_type == 'module': text += self.get_snippet_text(doc) else: text += self.get_man_text(doc) else: # this typically means we couldn't even parse the docstring, not just that the YAML is busted, # probably a quoting issue. raise AnsibleError("Parsing produced an empty object.") except Exception as e: display.vvv(traceback.format_exc()) raise AnsibleError("%s %s missing documentation (or could not parse documentation): %s\n" % (plugin_type, plugin, str(e))) if text: self.pager(text) return 0 def find_plugins(self, path, ptype): display.vvvv("Searching %s for plugins" % path) if not os.path.exists(path): display.vvvv("%s does not exist" % path) return bkey = ptype.upper() for plugin in os.listdir(path): display.vvvv("Found %s" % plugin) full_path = '/'.join([path, plugin]) if plugin.startswith('.'): continue elif os.path.isdir(full_path): continue elif any(plugin.endswith(x) for x in C.BLACKLIST_EXTS): continue elif plugin.startswith('__'): continue elif plugin in C.IGNORE_FILES: continue elif plugin .startswith('_'): if os.path.islink(full_path): # avoids aliases continue plugin = os.path.splitext(plugin)[0] # removes the extension plugin = plugin.lstrip('_') # remove underscore from deprecated plugins if plugin not in BLACKLIST.get(bkey, ()): self.plugin_list.add(plugin) display.vvvv("Added %s" % plugin) def get_plugin_list_text(self, loader): columns = display.columns displace = max(len(x) for x in self.plugin_list) linelimit = columns - displace - 5 text = [] deprecated = [] for plugin in sorted(self.plugin_list): try: # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs filename = loader.find_plugin(plugin, mod_type='.py', ignore_deprecated=True, check_aliases=True) if filename is None: continue if filename.endswith(".ps1"): continue if os.path.isdir(filename): continue doc = None try: doc, plainexamples, returndocs, metadata = get_docstring(filename, fragment_loader) except: display.warning("%s has a documentation formatting error" % plugin) if not doc or not isinstance(doc, dict): desc = 'UNDOCUMENTED' display.warning("%s parsing did not produce documentation." % plugin) else: desc = self.tty_ify(doc.get('short_description', 'INVALID SHORT DESCRIPTION').strip()) if len(desc) > linelimit: desc = desc[:linelimit] + '...' if plugin.startswith('_'): # Handle deprecated deprecated.append("%-*s %-*.*s" % (displace, plugin[1:], linelimit, len(desc), desc)) else: text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(desc), desc)) except Exception as e: raise AnsibleError("Failed reading docs at %s: %s" % (plugin, to_native(e))) if len(deprecated) > 0: text.append("\nDEPRECATED:") text.extend(deprecated) return "\n".join(text) def get_plugin_list_filenames(self, loader): columns = display.columns displace = max(len(x) for x in self.plugin_list) linelimit = columns - displace - 5 text = [] for plugin in sorted(self.plugin_list): try: # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs filename = loader.find_plugin(plugin, mod_type='.py', ignore_deprecated=True, check_aliases=True) if filename is None: continue if filename.endswith(".ps1"): continue if os.path.isdir(filename): continue text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(filename), filename)) except Exception as e: raise AnsibleError("Failed reading docs at %s: %s" % (plugin, to_native(e))) return "\n".join(text) @staticmethod def print_paths(finder): ''' Returns a string suitable for printing of the search path ''' # Uses a list to get the order right ret = [] for i in finder._get_paths(subdirs=False): if i not in ret: ret.append(i) return os.pathsep.join(ret) def get_snippet_text(self, doc): text = [] desc = CLI.tty_ify(doc['short_description']) text.append("- name: %s" % (desc)) text.append(" %s:" % (doc['module'])) pad = 31 subdent = " " * pad limit = display.columns - pad for o in sorted(doc['options'].keys()): opt = doc['options'][o] if isinstance(opt['description'], string_types): desc = CLI.tty_ify(opt['description']) else: desc = CLI.tty_ify(" ".join(opt['description'])) required = opt.get('required', False) if not isinstance(required, bool): raise("Incorrect value for 'Required', a boolean is needed.: %s" % required) if required: desc = "(required) %s" % desc o = '%s:' % o text.append(" %-20s # %s" % (o, textwrap.fill(desc, limit, subsequent_indent=subdent))) text.append('') return "\n".join(text) def _dump_yaml(self, struct, indent): return CLI.tty_ify('\n'.join([indent + line for line in yaml.dump(struct, default_flow_style=False, Dumper=AnsibleDumper).split('\n')])) def add_fields(self, text, fields, limit, opt_indent): for o in sorted(fields): opt = fields[o] required = opt.pop('required', False) if not isinstance(required, bool): raise AnsibleError("Incorrect value for 'Required', a boolean is needed.: %s" % required) if required: opt_leadin = "=" else: opt_leadin = "-" text.append("%s %s" % (opt_leadin, o)) if isinstance(opt['description'], list): for entry in opt['description']: text.append(textwrap.fill(CLI.tty_ify(entry), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) else: text.append(textwrap.fill(CLI.tty_ify(opt['description']), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) del opt['description'] aliases = '' if 'aliases' in opt: if len(opt['aliases']) > 0: aliases = "(Aliases: " + ", ".join(str(i) for i in opt['aliases']) + ")" del opt['aliases'] choices = '' if 'choices' in opt: if len(opt['choices']) > 0: choices = "(Choices: " + ", ".join(str(i) for i in opt['choices']) + ")" del opt['choices'] default = '' if 'default' in opt or not required: default = "[Default: %s" % str(opt.pop('default', '(null)')) + "]" text.append(textwrap.fill(CLI.tty_ify(aliases + choices + default), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) if 'options' in opt: text.append("%soptions:\n" % opt_indent) self.add_fields(text, opt.pop('options'), limit, opt_indent + opt_indent) if 'spec' in opt: text.append("%sspec:\n" % opt_indent) self.add_fields(text, opt.pop('spec'), limit, opt_indent + opt_indent) conf = {} for config in ('env', 'ini', 'yaml', 'vars'): if config in opt and opt[config]: conf[config] = opt.pop(config) for ignore in self.IGNORE: for item in conf[config]: if ignore in item: del item[ignore] if conf: text.append(self._dump_yaml({'set_via': conf}, opt_indent)) for k in sorted(opt): if k.startswith('_'): continue if isinstance(opt[k], string_types): text.append('%s%s: %s' % (opt_indent, k, textwrap.fill(CLI.tty_ify(opt[k]), limit - (len(k) + 2), subsequent_indent=opt_indent))) elif isinstance(opt[k], (list, tuple)): text.append(CLI.tty_ify('%s%s: %s' % (opt_indent, k, ', '.join(opt[k])))) else: text.append(self._dump_yaml({k: opt[k]}, opt_indent)) text.append('') @staticmethod def get_support_block(doc): # Note: 'curated' is deprecated and not used in any of the modules we ship support_level_msg = {'core': 'The Ansible Core Team', 'network': 'The Ansible Network Team', 'certified': 'an Ansible Partner', 'community': 'The Ansible Community', 'curated': 'A Third Party', } if doc['metadata'].get('metadata_version') in ('1.0', '1.1'): return [" * This module is maintained by %s" % support_level_msg[doc['metadata']['supported_by']]] return [] @staticmethod def get_metadata_block(doc): text = [] if doc['metadata'].get('metadata_version') in ('1.0', '1.1'): text.append("METADATA:") text.append('\tSUPPORT LEVEL: %s' % doc['metadata']['supported_by']) for k in (m for m in doc['metadata'] if m not in ('version', 'metadata_version', 'supported_by')): if isinstance(k, list): text.append("\t%s: %s" % (k.capitalize(), ", ".join(doc['metadata'][k]))) else: text.append("\t%s: %s" % (k.capitalize(), doc['metadata'][k])) return text return [] def get_man_text(self, doc): self.IGNORE = self.IGNORE + (self.options.type,) opt_indent = " " text = [] pad = display.columns * 0.20 limit = max(display.columns - int(pad), 70) text.append("> %s (%s)\n" % (doc.get(self.options.type, doc.get('plugin_type')).upper(), doc.pop('filename'))) if isinstance(doc['description'], list): desc = " ".join(doc.pop('description')) else: desc = doc.pop('description') text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent)) if 'deprecated' in doc and doc['deprecated'] is not None and len(doc['deprecated']) > 0: text.append("DEPRECATED: \n") if isinstance(doc['deprecated'], dict): text.append("\tReason: %(why)s\n\tWill be removed in: Ansible %(removed_in)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated')) else: text.append("%s" % doc.pop('deprecated')) text.append("\n") try: support_block = self.get_support_block(doc) if support_block: text.extend(support_block) except: pass # FIXME: not suported by plugins if doc.pop('action', False): text.append(" * note: %s\n" % "This module has a corresponding action plugin.") if 'options' in doc and doc['options']: text.append("OPTIONS (= is mandatory):\n") self.add_fields(text, doc.pop('options'), limit, opt_indent) text.append('') if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0: text.append("NOTES:") for note in doc['notes']: text.append(textwrap.fill(CLI.tty_ify(note), limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent)) text.append('') del doc['notes'] if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0: req = ", ".join(doc.pop('requirements')) text.append("REQUIREMENTS:%s\n" % textwrap.fill(CLI.tty_ify(req), limit - 16, initial_indent=" ", subsequent_indent=opt_indent)) # Generic handler for k in sorted(doc): if k in self.IGNORE or not doc[k]: continue if isinstance(doc[k], string_types): text.append('%s: %s' % (k.upper(), textwrap.fill(CLI.tty_ify(doc[k]), limit - (len(k) + 2), subsequent_indent=opt_indent))) elif isinstance(doc[k], (list, tuple)): text.append('%s: %s' % (k.upper(), ', '.join(doc[k]))) else: text.append(self._dump_yaml({k.upper(): doc[k]}, opt_indent)) del doc[k] text.append('') if 'plainexamples' in doc and doc['plainexamples'] is not None: text.append("EXAMPLES:") if isinstance(doc['plainexamples'], string_types): text.append(doc.pop('plainexamples').strip()) else: text.append(yaml.dump(doc.pop('plainexamples'), indent=2, default_flow_style=False)) text.append('') if 'returndocs' in doc and doc['returndocs'] is not None: text.append("RETURN VALUES:\n") if isinstance(doc['returndocs'], string_types): text.append(doc.pop('returndocs')) else: text.append(yaml.dump(doc.pop('returndocs'), indent=2, default_flow_style=False)) text.append('') try: metadata_block = self.get_metadata_block(doc) if metadata_block: text.extend(metadata_block) text.append('') except: pass # metadata is optional return "\n".join(text) ansible-2.5.1/lib/ansible/cli/galaxy.py0000644000000000000000000007647713265756155017727 0ustar rootroot00000000000000######################################################################## # # (C) 2013, James Cammarata # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # ######################################################################## from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os.path import re import shutil import sys import time import yaml from jinja2 import Environment, FileSystemLoader import ansible.constants as C from ansible.cli import CLI from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.galaxy import Galaxy from ansible.galaxy.api import GalaxyAPI from ansible.galaxy.login import GalaxyLogin from ansible.galaxy.role import GalaxyRole from ansible.galaxy.token import GalaxyToken from ansible.module_utils._text import to_text from ansible.playbook.role.requirement import RoleRequirement try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class GalaxyCLI(CLI): '''command to manage Ansible roles in shared repostories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.''' SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url") VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup") def __init__(self, args): self.api = None self.galaxy = None super(GalaxyCLI, self).__init__(args) def set_action(self): super(GalaxyCLI, self).set_action() # specific to actions if self.action == "delete": self.parser.set_usage("usage: %prog delete [options] github_user github_repo") elif self.action == "import": self.parser.set_usage("usage: %prog import [options] github_user github_repo") self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.') self.parser.add_option('--branch', dest='reference', help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)') self.parser.add_option('--role-name', dest='role_name', help='The name the role should have, if different than the repo name') self.parser.add_option('--status', dest='check_status', action='store_true', default=False, help='Check the status of the most recent import request for given github_user/github_repo.') elif self.action == "info": self.parser.set_usage("usage: %prog info [options] role_name[,version]") elif self.action == "init": self.parser.set_usage("usage: %prog init [options] role_name") self.parser.add_option('--init-path', dest='init_path', default="./", help='The path in which the skeleton role will be created. The default is the current working directory.') self.parser.add_option('--container-enabled', dest='container_enabled', action='store_true', default=False, help='Initialize the skeleton role with default contents for a Container Enabled role.') self.parser.add_option('--role-skeleton', dest='role_skeleton', default=C.GALAXY_ROLE_SKELETON, help='The path to a role skeleton that the new role should be based upon.') elif self.action == "install": self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]") self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False, help='Ignore errors and continue with the next specified role.') self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies') self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported') elif self.action == "remove": self.parser.set_usage("usage: %prog remove role1 role2 ...") elif self.action == "list": self.parser.set_usage("usage: %prog list [role_name]") elif self.action == "login": self.parser.set_usage("usage: %prog login [options]") self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.') elif self.action == "search": self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] " "[--author username]") self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by') self.parser.add_option('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by') self.parser.add_option('--author', dest='author', help='GitHub username') elif self.action == "setup": self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret") self.parser.add_option('--remove', dest='remove_id', default=None, help='Remove the integration matching the provided ID value. Use --list to see ID values.') self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.') # options that apply to more than one action if self.action in ['init', 'info']: self.parser.add_option('--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles") if self.action not in ("delete", "import", "init", "login", "setup"): # NOTE: while the option type=str, the default is a list, and the # callback will set the value to a list. self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.unfrack_paths, default=C.DEFAULT_ROLES_PATH, help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg' 'file (/etc/ansible/roles if not configured)', type='str') if self.action in ("init", "install"): self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role') def parse(self): ''' create an options parser for bin/ansible ''' self.parser = CLI.base_parser( usage="usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS), epilog="\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) # common self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination') self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.') self.set_action() super(GalaxyCLI, self).parse() display.verbosity = self.options.verbosity self.galaxy = Galaxy(self.options) def run(self): super(GalaxyCLI, self).run() self.api = GalaxyAPI(self.galaxy) self.execute() def exit_without_ignore(self, rc=1): """ Exits with the specified return code unless the option --ignore-errors was specified """ if not self.options.ignore_errors: raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.') def _display_role_info(self, role_info): text = [u"", u"Role: %s" % to_text(role_info['name'])] text.append(u"\tdescription: %s" % role_info.get('description', '')) for k in sorted(role_info.keys()): if k in self.SKIP_INFO_KEYS: continue if isinstance(role_info[k], dict): text.append(u"\t%s:" % (k)) for key in sorted(role_info[k].keys()): if key in self.SKIP_INFO_KEYS: continue text.append(u"\t\t%s: %s" % (key, role_info[k][key])) else: text.append(u"\t%s: %s" % (k, role_info[k])) return u'\n'.join(text) ############################ # execute actions ############################ def execute_init(self): """ creates the skeleton framework of a role that complies with the galaxy metadata format. """ init_path = self.options.init_path force = self.options.force role_skeleton = self.options.role_skeleton role_name = self.args.pop(0).strip() if self.args else None if not role_name: raise AnsibleOptionsError("- no role name specified for init") role_path = os.path.join(init_path, role_name) if os.path.exists(role_path): if os.path.isfile(role_path): raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path) elif not force: raise AnsibleError("- the directory %s already exists." "you can use --force to re-initialize this directory,\n" "however it will reset any main.yml files that may have\n" "been modified there already." % role_path) inject_data = dict( role_name=role_name, author='your name', description='your description', company='your company (optional)', license='license (GPLv2, CC-BY, etc)', issue_tracker_url='http://example.com/issue/tracker', min_ansible_version='1.2', container_enabled=self.options.container_enabled ) # create role directory if not os.path.exists(role_path): os.makedirs(role_path) if role_skeleton is not None: skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE else: role_skeleton = self.galaxy.default_role_skeleton_path skeleton_ignore_expressions = ['^.*/.git_keep$'] role_skeleton = os.path.expanduser(role_skeleton) skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions] template_env = Environment(loader=FileSystemLoader(role_skeleton)) for root, dirs, files in os.walk(role_skeleton, topdown=True): rel_root = os.path.relpath(root, role_skeleton) in_templates_dir = rel_root.split(os.sep, 1)[0] == 'templates' dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)] for f in files: filename, ext = os.path.splitext(f) if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re): continue elif ext == ".j2" and not in_templates_dir: src_template = os.path.join(rel_root, f) dest_file = os.path.join(role_path, rel_root, filename) template_env.get_template(src_template).stream(inject_data).dump(dest_file) else: f_rel_path = os.path.relpath(os.path.join(root, f), role_skeleton) shutil.copyfile(os.path.join(root, f), os.path.join(role_path, f_rel_path)) for d in dirs: dir_path = os.path.join(role_path, rel_root, d) if not os.path.exists(dir_path): os.makedirs(dir_path) display.display("- %s was created successfully" % role_name) def execute_info(self): """ prints out detailed information about an installed role as well as info available from the galaxy API. """ if len(self.args) == 0: # the user needs to specify a role raise AnsibleOptionsError("- you must specify a user/role name") roles_path = self.options.roles_path data = '' for role in self.args: role_info = {'path': roles_path} gr = GalaxyRole(self.galaxy, role) install_info = gr.install_info if install_info: if 'version' in install_info: install_info['intalled_version'] = install_info['version'] del install_info['version'] role_info.update(install_info) remote_data = False if not self.options.offline: remote_data = self.api.lookup_role_by_name(role, False) if remote_data: role_info.update(remote_data) if gr.metadata: role_info.update(gr.metadata) req = RoleRequirement() role_spec = req.role_yaml_parse({'role': role}) if role_spec: role_info.update(role_spec) data = self._display_role_info(role_info) # FIXME: This is broken in both 1.9 and 2.0 as # _display_role_info() always returns something if not data: data = u"\n- the role %s was not found" % role self.pager(data) def execute_install(self): """ uses the args list of roles to be installed, unless -f was specified. The list of roles can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file. """ role_file = self.options.role_file if len(self.args) == 0 and role_file is None: # the user needs to specify one of either --role-file or specify a single user/role name raise AnsibleOptionsError("- you must specify a user/role name or a roles file") no_deps = self.options.no_deps force = self.options.force roles_left = [] if role_file: try: f = open(role_file, 'r') if role_file.endswith('.yaml') or role_file.endswith('.yml'): try: required_roles = yaml.safe_load(f.read()) except Exception as e: raise AnsibleError("Unable to load data from the requirements file: %s" % role_file) if required_roles is None: raise AnsibleError("No roles found in file: %s" % role_file) for role in required_roles: if "include" not in role: role = RoleRequirement.role_yaml_parse(role) display.vvv("found role %s in yaml file" % str(role)) if "name" not in role and "scm" not in role: raise AnsibleError("Must specify name or src for role") roles_left.append(GalaxyRole(self.galaxy, **role)) else: with open(role["include"]) as f_include: try: roles_left += [ GalaxyRole(self.galaxy, **r) for r in (RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include)) ] except Exception as e: msg = "Unable to load data from the include requirements file: %s %s" raise AnsibleError(msg % (role_file, e)) else: display.deprecated("going forward only the yaml format will be supported", version="2.6") # roles listed in a file, one per line for rline in f.readlines(): if rline.startswith("#") or rline.strip() == '': continue display.debug('found role %s in text file' % str(rline)) role = RoleRequirement.role_yaml_parse(rline.strip()) roles_left.append(GalaxyRole(self.galaxy, **role)) f.close() except (IOError, OSError) as e: raise AnsibleError('Unable to open %s: %s' % (role_file, str(e))) else: # roles were specified directly, so we'll just go out grab them # (and their dependencies, unless the user doesn't want us to). for rname in self.args: role = RoleRequirement.role_yaml_parse(rname.strip()) roles_left.append(GalaxyRole(self.galaxy, **role)) for role in roles_left: # only process roles in roles files when names matches if given if role_file and self.args and role.name not in self.args: display.vvv('Skipping role %s' % role.name) continue display.vvv('Processing role %s ' % role.name) # query the galaxy API for the role data if role.install_info is not None: if role.install_info['version'] != role.version or force: if force: display.display('- changing role %s from %s to %s' % (role.name, role.install_info['version'], role.version or "unspecified")) role.remove() else: display.warning('- %s (%s) is already installed - use --force to change version to %s' % (role.name, role.install_info['version'], role.version or "unspecified")) continue else: if not force: display.display('- %s is already installed, skipping.' % str(role)) continue try: installed = role.install() except AnsibleError as e: display.warning("- %s was NOT installed successfully: %s " % (role.name, str(e))) self.exit_without_ignore() continue # install dependencies, if we want them if not no_deps and installed: if not role.metadata: display.warning("Meta file %s is empty. Skipping dependencies." % role.path) else: role_dependencies = role.metadata.get('dependencies') or [] for dep in role_dependencies: display.debug('Installing dep %s' % dep) dep_req = RoleRequirement() dep_info = dep_req.role_yaml_parse(dep) dep_role = GalaxyRole(self.galaxy, **dep_info) if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None: # we know we can skip this, as it's not going to # be found on galaxy.ansible.com continue if dep_role.install_info is None: if dep_role not in roles_left: display.display('- adding dependency: %s' % str(dep_role)) roles_left.append(dep_role) else: display.display('- dependency %s already pending installation.' % dep_role.name) else: if dep_role.install_info['version'] != dep_role.version: display.warning('- dependency %s from role %s differs from already installed version (%s), skipping' % (str(dep_role), role.name, dep_role.install_info['version'])) else: display.display('- dependency %s is already installed, skipping.' % dep_role.name) if not installed: display.warning("- %s was NOT installed successfully." % role.name) self.exit_without_ignore() return 0 def execute_remove(self): """ removes the list of roles passed as arguments from the local system. """ if len(self.args) == 0: raise AnsibleOptionsError('- you must specify at least one role to remove.') for role_name in self.args: role = GalaxyRole(self.galaxy, role_name) try: if role.remove(): display.display('- successfully removed %s' % role_name) else: display.display('- %s is not installed, skipping.' % role_name) except Exception as e: raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e))) return 0 def execute_list(self): """ lists the roles installed on the local system or matches a single role passed as an argument. """ if len(self.args) > 1: raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list") if len(self.args) == 1: # show only the request role, if it exists name = self.args.pop() gr = GalaxyRole(self.galaxy, name) if gr.metadata: install_info = gr.install_info version = None if install_info: version = install_info.get("version", None) if not version: version = "(unknown version)" # show some more info about single roles here display.display("- %s, %s" % (name, version)) else: display.display("- the role %s was not found" % name) else: # show all valid roles in the roles_path directory roles_path = self.options.roles_path for path in roles_path: role_path = os.path.expanduser(path) if not os.path.exists(role_path): raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % role_path) elif not os.path.isdir(role_path): raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % role_path) path_files = os.listdir(role_path) for path_file in path_files: gr = GalaxyRole(self.galaxy, path_file) if gr.metadata: install_info = gr.install_info version = None if install_info: version = install_info.get("version", None) if not version: version = "(unknown version)" display.display("- %s, %s" % (path_file, version)) return 0 def execute_search(self): ''' searches for roles on the Ansible Galaxy server''' page_size = 1000 search = None if len(self.args): terms = [] for i in range(len(self.args)): terms.append(self.args.pop()) search = '+'.join(terms[::-1]) if not search and not self.options.platforms and not self.options.galaxy_tags and not self.options.author: raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.") response = self.api.search_roles(search, platforms=self.options.platforms, tags=self.options.galaxy_tags, author=self.options.author, page_size=page_size) if response['count'] == 0: display.display("No roles match your search.", color=C.COLOR_ERROR) return True data = [u''] if response['count'] > page_size: data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size)) else: data.append(u"Found %d roles matching your search:" % response['count']) max_len = [] for role in response['results']: max_len.append(len(role['username'] + '.' + role['name'])) name_len = max(max_len) format_str = u" %%-%ds %%s" % name_len data.append(u'') data.append(format_str % (u"Name", u"Description")) data.append(format_str % (u"----", u"-----------")) for role in response['results']: data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description'])) data = u'\n'.join(data) self.pager(data) return True def execute_login(self): """ verify user's identify via Github and retrieve an auth token from Ansible Galaxy. """ # Authenticate with github and retrieve a token if self.options.token is None: if C.GALAXY_TOKEN: github_token = C.GALAXY_TOKEN else: login = GalaxyLogin(self.galaxy) github_token = login.create_github_token() else: github_token = self.options.token galaxy_response = self.api.authenticate(github_token) if self.options.token is None and C.GALAXY_TOKEN is None: # Remove the token we created login.remove_github_token() # Store the Galaxy token token = GalaxyToken() token.set(galaxy_response['token']) display.display("Successfully logged into Galaxy as %s" % galaxy_response['username']) return 0 def execute_import(self): """ used to import a role into Ansible Galaxy """ colors = { 'INFO': 'normal', 'WARNING': C.COLOR_WARN, 'ERROR': C.COLOR_ERROR, 'SUCCESS': C.COLOR_OK, 'FAILED': C.COLOR_ERROR, } if len(self.args) < 2: raise AnsibleError("Expected a github_username and github_repository. Use --help.") github_repo = to_text(self.args.pop(), errors='surrogate_or_strict') github_user = to_text(self.args.pop(), errors='surrogate_or_strict') if self.options.check_status: task = self.api.get_import_task(github_user=github_user, github_repo=github_repo) else: # Submit an import request task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference, role_name=self.options.role_name) if len(task) > 1: # found multiple roles associated with github_user/github_repo display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo), color='yellow') display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED) for t in task: display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED) display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo), color=C.COLOR_CHANGED) return 0 # found a single role as expected display.display("Successfully submitted import request %d" % task[0]['id']) if not self.options.wait: display.display("Role name: %s" % task[0]['summary_fields']['role']['name']) display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo'])) if self.options.check_status or self.options.wait: # Get the status of the import msg_list = [] finished = False while not finished: task = self.api.get_import_task(task_id=task[0]['id']) for msg in task[0]['summary_fields']['task_messages']: if msg['id'] not in msg_list: display.display(msg['message_text'], color=colors[msg['message_type']]) msg_list.append(msg['id']) if task[0]['state'] in ['SUCCESS', 'FAILED']: finished = True else: time.sleep(10) return 0 def execute_setup(self): """ Setup an integration from Github or Travis for Ansible Galaxy roles""" if self.options.setup_list: # List existing integration secrets secrets = self.api.list_secrets() if len(secrets) == 0: # None found display.display("No integrations found.") return 0 display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK) display.display("---------- ---------- ----------", color=C.COLOR_OK) for secret in secrets: display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'], secret['github_repo']), color=C.COLOR_OK) return 0 if self.options.remove_id: # Remove a secret self.api.remove_secret(self.options.remove_id) display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK) return 0 if len(self.args) < 4: raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret") secret = self.args.pop() github_repo = self.args.pop() github_user = self.args.pop() source = self.args.pop() resp = self.api.add_secret(source, github_user, github_repo, secret) display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo'])) return 0 def execute_delete(self): """ Delete a role from Ansible Galaxy. """ if len(self.args) < 2: raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo") github_repo = self.args.pop() github_user = self.args.pop() resp = self.api.delete_role(github_user, github_repo) if len(resp['deleted_roles']) > 1: display.display("Deleted the following roles:") display.display("ID User Name") display.display("------ --------------- ----------") for role in resp['deleted_roles']: display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name)) display.display(resp['status']) return True ansible-2.5.1/lib/ansible/cli/inventory.py0000644000000000000000000003406013265756155020455 0ustar rootroot00000000000000# (c) 2017, Brian Coca # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import optparse from operator import attrgetter from ansible import constants as C from ansible.cli import CLI from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.inventory.host import Host from ansible.plugins.loader import vars_loader from ansible.parsing.dataloader import DataLoader from ansible.utils.vars import combine_vars try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() INTERNAL_VARS = frozenset(['ansible_diff_mode', 'ansible_facts', 'ansible_forks', 'ansible_inventory_sources', 'ansible_limit', 'ansible_playbook_python', 'ansible_run_tags', 'ansible_skip_tags', 'ansible_version', 'inventory_dir', 'inventory_file', 'inventory_hostname', 'inventory_hostname_short', 'groups', 'group_names', 'omit', 'playbook_dir', ]) class InventoryCLI(CLI): ''' used to display or dump the configured inventory as Ansible sees it ''' ARGUMENTS = {'host': 'The name of a host to match in the inventory, relevant when using --list', 'group': 'The name of a group in the inventory, relevant when using --graph', } def __init__(self, args): super(InventoryCLI, self).__init__(args) self.vm = None self.loader = None self.inventory = None self._new_api = True def parse(self): self.parser = CLI.base_parser( usage='usage: %prog [options] [host|group]', epilog='Show Ansible inventory information, by default it uses the inventory script JSON format', inventory_opts=True, vault_opts=True, basedir_opts=True, ) # remove unused default options self.parser.remove_option('--limit') self.parser.remove_option('--list-hosts') # Actions action_group = optparse.OptionGroup(self.parser, "Actions", "One of following must be used on invocation, ONLY ONE!") action_group.add_option("--list", action="store_true", default=False, dest='list', help='Output all hosts info, works as inventory script') action_group.add_option("--host", action="store", default=None, dest='host', help='Output specific host info, works as inventory script') action_group.add_option("--graph", action="store_true", default=False, dest='graph', help='create inventory graph, if supplying pattern it must be a valid group name') self.parser.add_option_group(action_group) # graph self.parser.add_option("-y", "--yaml", action="store_true", default=False, dest='yaml', help='Use YAML format instead of default JSON, ignored for --graph') self.parser.add_option("--vars", action="store_true", default=False, dest='show_vars', help='Add vars to graph display, ignored unless used with --graph') # list self.parser.add_option("--export", action="store_true", default=C.INVENTORY_EXPORT, dest='export', help="When doing an --list, represent in a way that is optimized for export," "not as an accurate representation of how Ansible has processed it") # self.parser.add_option("--ignore-vars-plugins", action="store_true", default=False, dest='ignore_vars_plugins', # help="When doing an --list, skip vars data from vars plugins, by default, this would include group_vars/ and host_vars/") super(InventoryCLI, self).parse() display.verbosity = self.options.verbosity self.validate_conflicts(vault_opts=True) # there can be only one! and, at least, one! used = 0 for opt in (self.options.list, self.options.host, self.options.graph): if opt: used += 1 if used == 0: raise AnsibleOptionsError("No action selected, at least one of --host, --graph or --list needs to be specified.") elif used > 1: raise AnsibleOptionsError("Conflicting options used, only one of --host, --graph or --list can be used at the same time.") # set host pattern to default if not supplied if len(self.args) > 0: self.options.pattern = self.args[0] else: self.options.pattern = 'all' def run(self): results = None super(InventoryCLI, self).run() # Initialize needed objects if getattr(self, '_play_prereqs', False): self.loader, self.inventory, self.vm = self._play_prereqs(self.options) else: # fallback to pre 2.4 way of initialzing from ansible.vars import VariableManager from ansible.inventory import Inventory self._new_api = False self.loader = DataLoader() self.vm = VariableManager() # use vault if needed if self.options.vault_password_file: vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=self.loader) elif self.options.ask_vault_pass: vault_pass = self.ask_vault_passwords() else: vault_pass = None if vault_pass: self.loader.set_vault_password(vault_pass) # actually get inventory and vars self.inventory = Inventory(loader=self.loader, variable_manager=self.vm, host_list=self.options.inventory) self.vm.set_inventory(self.inventory) if self.options.host: hosts = self.inventory.get_hosts(self.options.host) if len(hosts) != 1: raise AnsibleOptionsError("You must pass a single valid host to --hosts parameter") myvars = self._get_host_variables(host=hosts[0]) self._remove_internal(myvars) # FIXME: should we template first? results = self.dump(myvars) elif self.options.graph: results = self.inventory_graph() elif self.options.list: top = self._get_group('all') if self.options.yaml: results = self.yaml_inventory(top) else: results = self.json_inventory(top) results = self.dump(results) if results: # FIXME: pager? display.display(results) exit(0) exit(1) def dump(self, stuff): if self.options.yaml: import yaml from ansible.parsing.yaml.dumper import AnsibleDumper results = yaml.dump(stuff, Dumper=AnsibleDumper, default_flow_style=False) else: from ansible.module_utils.basic import jsonify results = jsonify(stuff, sort_keys=True, indent=4) return results # FIXME: refactor to use same for VM def get_plugin_vars(self, path, entity): data = {} def _get_plugin_vars(plugin, path, entities): data = {} try: data = plugin.get_vars(self.loader, path, entity) except AttributeError: try: if isinstance(entity, Host): data.update(plugin.get_host_vars(entity.name)) else: data.update(plugin.get_group_vars(entity.name)) except AttributeError: if hasattr(plugin, 'run'): raise AnsibleError("Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path)) else: raise AnsibleError("Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path)) return data for plugin in vars_loader.all(): data = combine_vars(data, _get_plugin_vars(plugin, path, entity)) return data def _get_group_variables(self, group): # get info from inventory source res = group.get_vars() # FIXME: add switch to skip vars plugins, add vars plugin info for inventory_dir in self.inventory._sources: res = combine_vars(res, self.get_plugin_vars(inventory_dir, group)) if group.priority != 1: res['ansible_group_priority'] = group.priority return res def _get_host_variables(self, host): if self.options.export: hostvars = host.get_vars() # FIXME: add switch to skip vars plugins # add vars plugin info for inventory_dir in self.inventory._sources: hostvars = combine_vars(hostvars, self.get_plugin_vars(inventory_dir, host)) else: if self._new_api: hostvars = self.vm.get_vars(host=host, include_hostvars=False) else: hostvars = self.vm.get_vars(self.loader, host=host, include_hostvars=False) return hostvars def _get_group(self, gname): if self._new_api: group = self.inventory.groups.get(gname) else: group = self.inventory.get_group(gname) return group def _remove_internal(self, dump): for internal in INTERNAL_VARS: if internal in dump: del dump[internal] def _remove_empty(self, dump): # remove empty keys for x in ('hosts', 'vars', 'children'): if x in dump and not dump[x]: del dump[x] def _show_vars(self, dump, depth): result = [] self._remove_internal(dump) if self.options.show_vars: for (name, val) in sorted(dump.items()): result.append(self._graph_name('{%s = %s}' % (name, val), depth)) return result def _graph_name(self, name, depth=0): if depth: name = " |" * (depth) + "--%s" % name return name def _graph_group(self, group, depth=0): result = [self._graph_name('@%s:' % group.name, depth)] depth = depth + 1 for kid in sorted(group.child_groups, key=attrgetter('name')): result.extend(self._graph_group(kid, depth)) if group.name != 'all': for host in sorted(group.hosts, key=attrgetter('name')): result.append(self._graph_name(host.name, depth)) result.extend(self._show_vars(host.get_vars(), depth + 1)) result.extend(self._show_vars(self._get_group_variables(group), depth)) return result def inventory_graph(self): start_at = self._get_group(self.options.pattern) if start_at: return '\n'.join(self._graph_group(start_at)) else: raise AnsibleOptionsError("Pattern must be valid group name when using --graph") def json_inventory(self, top): def format_group(group): results = {} results[group.name] = {} if group.name != 'all': results[group.name]['hosts'] = [h.name for h in sorted(group.hosts, key=attrgetter('name'))] results[group.name]['children'] = [] for subgroup in sorted(group.child_groups, key=attrgetter('name')): results[group.name]['children'].append(subgroup.name) results.update(format_group(subgroup)) if self.options.export: results[group.name]['vars'] = self._get_group_variables(group) self._remove_empty(results[group.name]) return results results = format_group(top) # populate meta results['_meta'] = {'hostvars': {}} hosts = self.inventory.get_hosts() for host in hosts: hvars = self._get_host_variables(host) if hvars: self._remove_internal(hvars) results['_meta']['hostvars'][host.name] = hvars return results def yaml_inventory(self, top): seen = [] def format_group(group): results = {} # initialize group + vars results[group.name] = {} # subgroups results[group.name]['children'] = {} for subgroup in sorted(group.child_groups, key=attrgetter('name')): if subgroup.name != 'all': results[group.name]['children'].update(format_group(subgroup)) # hosts for group results[group.name]['hosts'] = {} if group.name != 'all': for h in sorted(group.hosts, key=attrgetter('name')): myvars = {} if h.name not in seen: # avoid defining host vars more than once seen.append(h.name) myvars = self._get_host_variables(host=h) self._remove_internal(myvars) results[group.name]['hosts'][h.name] = myvars if self.options.export: gvars = self._get_group_variables(group) if gvars: results[group.name]['vars'] = gvars self._remove_empty(results[group.name]) return results return format_group(top) ansible-2.5.1/lib/ansible/cli/playbook.py0000644000000000000000000002055113265756155020240 0ustar rootroot00000000000000# (c) 2012, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ######################################################## from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import stat from ansible.cli import CLI from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.executor.playbook_executor import PlaybookExecutor from ansible.playbook.block import Block from ansible.playbook.play_context import PlayContext try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class PlaybookCLI(CLI): ''' the tool to run *Ansible playbooks*, which are a configuration and multinode deployment system. See the project home page (https://docs.ansible.com) for more information. ''' def parse(self): # create parser for CLI options parser = CLI.base_parser( usage="%prog [options] playbook.yml [playbook2 ...]", connect_opts=True, meta_opts=True, runas_opts=True, subset_opts=True, check_opts=True, inventory_opts=True, runtask_opts=True, vault_opts=True, fork_opts=True, module_opts=True, desc="Runs Ansible playbooks, executing the defined tasks on the targeted hosts.", ) # ansible playbook specific opts parser.add_option('--list-tasks', dest='listtasks', action='store_true', help="list all tasks that would be executed") parser.add_option('--list-tags', dest='listtags', action='store_true', help="list all available tags") parser.add_option('--step', dest='step', action='store_true', help="one-step-at-a-time: confirm each task before running") parser.add_option('--start-at-task', dest='start_at_task', help="start the playbook at the task matching this name") self.parser = parser super(PlaybookCLI, self).parse() if len(self.args) == 0: raise AnsibleOptionsError("You must specify a playbook file to run") display.verbosity = self.options.verbosity self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True) def run(self): super(PlaybookCLI, self).run() # Note: slightly wrong, this is written so that implicit localhost # Manage passwords sshpass = None becomepass = None passwords = {} # initial error check, to make sure all specified playbooks are accessible # before we start running anything through the playbook executor for playbook in self.args: if not os.path.exists(playbook): raise AnsibleError("the playbook: %s could not be found" % playbook) if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)): raise AnsibleError("the playbook: %s does not appear to be a file" % playbook) # don't deal with privilege escalation or passwords when we don't need to if not self.options.listhosts and not self.options.listtasks and not self.options.listtags and not self.options.syntax: self.normalize_become_options() (sshpass, becomepass) = self.ask_passwords() passwords = {'conn_pass': sshpass, 'become_pass': becomepass} loader, inventory, variable_manager = self._play_prereqs(self.options) # (which is not returned in list_hosts()) is taken into account for # warning if inventory is empty. But it can't be taken into account for # checking if limit doesn't match any hosts. Instead we don't worry about # limit if only implicit localhost was in inventory to start with. # # Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts()) hosts = CLI.get_host_list(inventory, self.options.subset) # flush fact cache if requested if self.options.flush_cache: self._flush_cache(inventory, variable_manager) # create the playbook executor, which manages running the plays via a task queue manager pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options, passwords=passwords) results = pbex.run() if isinstance(results, list): for p in results: display.display('\nplaybook: %s' % p['playbook']) for idx, play in enumerate(p['plays']): if play._included_path is not None: loader.set_basedir(play._included_path) else: pb_dir = os.path.realpath(os.path.dirname(p['playbook'])) loader.set_basedir(pb_dir) msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name) mytags = set(play.tags) msg += '\tTAGS: [%s]' % (','.join(mytags)) if self.options.listhosts: playhosts = set(inventory.get_hosts(play.hosts)) msg += "\n pattern: %s\n hosts (%d):" % (play.hosts, len(playhosts)) for host in playhosts: msg += "\n %s" % host display.display(msg) all_tags = set() if self.options.listtags or self.options.listtasks: taskmsg = '' if self.options.listtasks: taskmsg = ' tasks:\n' def _process_block(b): taskmsg = '' for task in b.block: if isinstance(task, Block): taskmsg += _process_block(task) else: if task.action == 'meta': continue all_tags.update(task.tags) if self.options.listtasks: cur_tags = list(mytags.union(set(task.tags))) cur_tags.sort() if task.name: taskmsg += " %s" % task.get_name() else: taskmsg += " %s" % task.action taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags) return taskmsg all_vars = variable_manager.get_vars(play=play) play_context = PlayContext(play=play, options=self.options) for block in play.compile(): block = block.filter_tagged_tasks(play_context, all_vars) if not block.has_tasks(): continue taskmsg += _process_block(block) if self.options.listtags: cur_tags = list(mytags.union(all_tags)) cur_tags.sort() taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags) display.display(taskmsg) return 0 else: return results def _flush_cache(self, inventory, variable_manager): for host in inventory.list_hosts(): hostname = host.get_name() variable_manager.clear_facts(hostname) ansible-2.5.1/lib/ansible/cli/pull.py0000644000000000000000000003530213265756155017374 0ustar rootroot00000000000000# (c) 2012, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . from __future__ import (absolute_import, division, print_function) __metaclass__ = type ######################################################## import datetime import os import platform import random import shutil import socket import sys import time from ansible.cli import CLI from ansible.errors import AnsibleOptionsError from ansible.module_utils._text import to_native, to_text from ansible.plugins.loader import module_loader from ansible.utils.cmd_functions import run_cmd try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() ######################################################## class PullCLI(CLI): ''' is used to up a remote copy of ansible on each managed node, each set to run via cron and update playbook source via a source repository. This inverts the default *push* architecture of ansible into a *pull* architecture, which has near-limitless scaling potential. The setup playbook can be tuned to change the cron frequency, logging locations, and parameters to ansible-pull. This is useful both for extreme scale-out as well as periodic remediation. Usage of the 'fetch' module to retrieve logs from ansible-pull runs would be an excellent way to gather and analyze remote logs from ansible-pull. ''' DEFAULT_REPO_TYPE = 'git' DEFAULT_PLAYBOOK = 'local.yml' REPO_CHOICES = ('git', 'subversion', 'hg', 'bzr') PLAYBOOK_ERRORS = { 1: 'File does not exist', 2: 'File is not readable', } SUPPORTED_REPO_MODULES = ['git'] ARGUMENTS = {'playbook.yml': 'The name of one the YAML format files to run as an Ansible playbook.' 'This can be a relative path within the checkout. By default, Ansible will' "look for a playbook based on the host's fully-qualified domain name," 'on the host hostname and finally a playbook named *local.yml*.', } SKIP_INVENTORY_DEFAULTS = True def _get_inv_cli(self): inv_opts = '' if getattr(self.options, 'inventory'): for inv in self.options.inventory: if isinstance(inv, list): inv_opts += " -i '%s' " % ','.join(inv) elif ',' in inv or os.path.exists(inv): inv_opts += ' -i %s ' % inv return inv_opts def parse(self): ''' create an options parser for bin/ansible ''' self.parser = CLI.base_parser( usage='%prog -U [options] []', connect_opts=True, vault_opts=True, runtask_opts=True, subset_opts=True, inventory_opts=True, module_opts=True, runas_prompt_opts=True, desc="pulls playbooks from a VCS repo and executes them for the local host", ) # options unique to pull self.parser.add_option('--purge', default=False, action='store_true', help='purge checkout after playbook run') self.parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true', help='only run the playbook if the repository has been updated') self.parser.add_option('-s', '--sleep', dest='sleep', default=None, help='sleep for random interval (between 0 and n number of seconds) before starting. ' 'This is a useful way to disperse git requests') self.parser.add_option('-f', '--force', dest='force', default=False, action='store_true', help='run the playbook even if the repository could not be updated') self.parser.add_option('-d', '--directory', dest='dest', default=None, help='directory to checkout repository to') self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository') self.parser.add_option('--full', dest='fullclone', action='store_true', help='Do a full clone, instead of a shallow one.') self.parser.add_option('-C', '--checkout', dest='checkout', help='branch/tag/commit to checkout. Defaults to behavior of repository module.') self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true', help='adds the hostkey for the repo url if not already added') self.parser.add_option('-m', '--module-name', dest='module_name', default=self.DEFAULT_REPO_TYPE, help='Repository module name, which ansible will use to check out the repo. Choices are %s. Default is %s.' % (self.REPO_CHOICES, self.DEFAULT_REPO_TYPE)) self.parser.add_option('--verify-commit', dest='verify', default=False, action='store_true', help='verify GPG signature of checked out commit, if it fails abort running the playbook. ' 'This needs the corresponding VCS module to support such an operation') self.parser.add_option('--clean', dest='clean', default=False, action='store_true', help='modified files in the working repository will be discarded') self.parser.add_option('--track-subs', dest='tracksubs', default=False, action='store_true', help='submodules will track the latest changes. This is equivalent to specifying the --remote flag to git submodule update') self.parser.add_option("--check", default=False, dest='check', action='store_true', help="don't make any changes; instead, try to predict some of the changes that may occur") super(PullCLI, self).parse() if not self.options.dest: hostname = socket.getfqdn() # use a hostname dependent directory, in case of $HOME on nfs self.options.dest = os.path.join('~/.ansible/pull', hostname) self.options.dest = os.path.expandvars(os.path.expanduser(self.options.dest)) if os.path.exists(self.options.dest) and not os.path.isdir(self.options.dest): raise AnsibleOptionsError("%s is not a valid or accessible directory." % self.options.dest) if self.options.sleep: try: secs = random.randint(0, int(self.options.sleep)) self.options.sleep = secs except ValueError: raise AnsibleOptionsError("%s is not a number." % self.options.sleep) if not self.options.url: raise AnsibleOptionsError("URL for repository not specified, use -h for help") if self.options.module_name not in self.SUPPORTED_REPO_MODULES: raise AnsibleOptionsError("Unsupported repo module %s, choices are %s" % (self.options.module_name, ','.join(self.SUPPORTED_REPO_MODULES))) display.verbosity = self.options.verbosity self.validate_conflicts(vault_opts=True) def run(self): ''' use Runner lib to do SSH things ''' super(PullCLI, self).run() # log command line now = datetime.datetime.now() display.display(now.strftime("Starting Ansible Pull at %F %T")) display.display(' '.join(sys.argv)) # Build Checkout command # Now construct the ansible command node = platform.node() host = socket.getfqdn() limit_opts = 'localhost,%s,127.0.0.1' % ','.join(set([host, node, host.split('.')[0], node.split('.')[0]])) base_opts = '-c local ' if self.options.verbosity > 0: base_opts += ' -%s' % ''.join(["v" for x in range(0, self.options.verbosity)]) # Attempt to use the inventory passed in as an argument # It might not yet have been downloaded so use localhost as default inv_opts = self._get_inv_cli() if not inv_opts: inv_opts = " -i localhost, " # SCM specific options if self.options.module_name == 'git': repo_opts = "name=%s dest=%s" % (self.options.url, self.options.dest) if self.options.checkout: repo_opts += ' version=%s' % self.options.checkout if self.options.accept_host_key: repo_opts += ' accept_hostkey=yes' if self.options.private_key_file: repo_opts += ' key_file=%s' % self.options.private_key_file if self.options.verify: repo_opts += ' verify_commit=yes' if self.options.tracksubs: repo_opts += ' track_submodules=yes' if not self.options.fullclone: repo_opts += ' depth=1' elif self.options.module_name == 'subversion': repo_opts = "repo=%s dest=%s" % (self.options.url, self.options.dest) if self.options.checkout: repo_opts += ' revision=%s' % self.options.checkout if not self.options.fullclone: repo_opts += ' export=yes' elif self.options.module_name == 'hg': repo_opts = "repo=%s dest=%s" % (self.options.url, self.options.dest) if self.options.checkout: repo_opts += ' revision=%s' % self.options.checkout elif self.options.module_name == 'bzr': repo_opts = "name=%s dest=%s" % (self.options.url, self.options.dest) if self.options.checkout: repo_opts += ' version=%s' % self.options.checkout else: raise AnsibleOptionsError('Unsupported (%s) SCM module for pull, choices are: %s' % (self.options.module_name, ','.join(self.REPO_CHOICES))) # options common to all supported SCMS if self.options.clean: repo_opts += ' force=yes' path = module_loader.find_plugin(self.options.module_name) if path is None: raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name)) bin_path = os.path.dirname(os.path.abspath(sys.argv[0])) # hardcode local and inventory/host as this is just meant to fetch the repo cmd = '%s/ansible %s %s -m %s -a "%s" all -l "%s"' % (bin_path, inv_opts, base_opts, self.options.module_name, repo_opts, limit_opts) for ev in self.options.extra_vars: cmd += ' -e "%s"' % ev # Nap? if self.options.sleep: display.display("Sleeping for %d seconds..." % self.options.sleep) time.sleep(self.options.sleep) # RUN the Checkout command display.debug("running ansible with VCS module to checkout repo") display.vvvv('EXEC: %s' % cmd) rc, b_out, b_err = run_cmd(cmd, live=True) if rc != 0: if self.options.force: display.warning("Unable to update repository. Continuing with (forced) run of playbook.") else: return rc elif self.options.ifchanged and b'"changed": true' not in b_out: display.display("Repository has not changed, quitting.") return 0 playbook = self.select_playbook(self.options.dest) if playbook is None: raise AnsibleOptionsError("Could not find a playbook to run.") # Build playbook command cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook) if self.options.vault_password_files: for vault_password_file in self.options.vault_password_files: cmd += " --vault-password-file=%s" % vault_password_file if self.options.vault_ids: for vault_id in self.options.vault_ids: cmd += " --vault-id=%s" % vault_id for ev in self.options.extra_vars: cmd += ' -e "%s"' % ev if self.options.ask_sudo_pass or self.options.ask_su_pass or self.options.become_ask_pass: cmd += ' --ask-become-pass' if self.options.skip_tags: cmd += ' --skip-tags "%s"' % to_native(u','.join(self.options.skip_tags)) if self.options.tags: cmd += ' -t "%s"' % to_native(u','.join(self.options.tags)) if self.options.subset: cmd += ' -l "%s"' % self.options.subset else: cmd += ' -l "%s"' % limit_opts if self.options.check: cmd += ' -C' os.chdir(self.options.dest) # redo inventory options as new files might exist now inv_opts = self._get_inv_cli() if inv_opts: cmd += inv_opts # RUN THE PLAYBOOK COMMAND display.debug("running ansible-playbook to do actual work") display.debug('EXEC: %s' % cmd) rc, b_out, b_err = run_cmd(cmd, live=True) if self.options.purge: os.chdir('/') try: shutil.rmtree(self.options.dest) except Exception as e: display.error(u"Failed to remove %s: %s" % (self.options.dest, to_text(e))) return rc def try_playbook(self, path): if not os.path.exists(path): return 1 if not os.access(path, os.R_OK): return 2 return 0 def select_playbook(self, path): playbook = None if len(self.args) > 0 and self.args[0] is not None: playbook = os.path.join(path, self.args[0]) rc = self.try_playbook(playbook) if rc != 0: display.warning("%s: %s" % (playbook, self.PLAYBOOK_ERRORS[rc])) return None return playbook else: fqdn = socket.getfqdn() hostpb = os.path.join(path, fqdn + '.yml') shorthostpb = os.path.join(path, fqdn.split('.')[0] + '.yml') localpb = os.path.join(path, self.DEFAULT_PLAYBOOK) errors = [] for pb in [hostpb, shorthostpb, localpb]: rc = self.try_playbook(pb) if rc == 0: playbook = pb break else: errors.append("%s: %s" % (pb, self.PLAYBOOK_ERRORS[rc])) if playbook is None: display.warning("\n".join(errors)) return playbook ansible-2.5.1/lib/ansible/cli/vault.py0000644000000000000000000005143013265756155017553 0ustar rootroot00000000000000# (c) 2014, James Tanner # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # # ansible-vault is a script that encrypts/decrypts YAML files. See # http://docs.ansible.com/playbooks_vault.html for more details. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import sys from ansible.cli import CLI from ansible import constants as C from ansible.errors import AnsibleOptionsError from ansible.module_utils._text import to_text, to_bytes from ansible.parsing.dataloader import DataLoader from ansible.parsing.vault import VaultEditor, VaultLib, match_encrypt_secret try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class VaultCLI(CLI): ''' can encrypt any structured data file used by Ansible. This can include *group_vars/* or *host_vars/* inventory variables, variables loaded by *include_vars* or *vars_files*, or variable files passed on the ansible-playbook command line with *-e @file.yml* or *-e @file.json*. Role variables and defaults are also included! Because Ansible tasks, handlers, and other objects are data, these can also be encrypted with vault. If you'd like to not expose what variables you are using, you can keep an individual task file entirely encrypted. The password used with vault currently must be the same for all files you wish to use together at the same time. ''' VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "encrypt_string", "rekey", "view") FROM_STDIN = "stdin" FROM_ARGS = "the command line args" FROM_PROMPT = "the interactive prompt" def __init__(self, args): self.b_vault_pass = None self.b_new_vault_pass = None self.encrypt_string_read_stdin = False self.encrypt_secret = None self.encrypt_vault_id = None self.new_encrypt_secret = None self.new_encrypt_vault_id = None self.can_output = ['encrypt', 'decrypt', 'encrypt_string'] super(VaultCLI, self).__init__(args) def set_action(self): super(VaultCLI, self).set_action() # add output if needed if self.action in self.can_output: self.parser.add_option('--output', default=None, dest='output_file', help='output file name for encrypt or decrypt; use - for stdout', action="callback", callback=CLI.unfrack_path, type='string') # options specific to self.actions if self.action == "create": self.parser.set_usage("usage: %prog create [options] file_name") elif self.action == "decrypt": self.parser.set_usage("usage: %prog decrypt [options] file_name") elif self.action == "edit": self.parser.set_usage("usage: %prog edit [options] file_name") elif self.action == "view": self.parser.set_usage("usage: %prog view [options] file_name") elif self.action == "encrypt": self.parser.set_usage("usage: %prog encrypt [options] file_name") # I have no prefence for either dash or underscore elif self.action == "encrypt_string": self.parser.add_option('-p', '--prompt', dest='encrypt_string_prompt', action='store_true', help="Prompt for the string to encrypt") self.parser.add_option('-n', '--name', dest='encrypt_string_names', action='append', help="Specify the variable name") self.parser.add_option('--stdin-name', dest='encrypt_string_stdin_name', default=None, help="Specify the variable name for stdin") self.parser.set_usage("usage: %prog encrypt_string [--prompt] [options] string_to_encrypt") elif self.action == "rekey": self.parser.set_usage("usage: %prog rekey [options] file_name") # For encrypting actions, we can also specify which of multiple vault ids should be used for encrypting if self.action in ['create', 'encrypt', 'encrypt_string', 'rekey', 'edit']: self.parser.add_option('--encrypt-vault-id', default=[], dest='encrypt_vault_id', action='store', type='string', help='the vault id used to encrypt (required if more than vault-id is provided)') def parse(self): self.parser = CLI.base_parser( vault_opts=True, vault_rekey_opts=True, usage="usage: %%prog [%s] [options] [vaultfile.yml]" % "|".join(self.VALID_ACTIONS), desc="encryption/decryption utility for Ansible data files", epilog="\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) self.set_action() super(VaultCLI, self).parse() self.validate_conflicts(vault_opts=True, vault_rekey_opts=True) display.verbosity = self.options.verbosity if self.options.vault_ids: for vault_id in self.options.vault_ids: if u';' in vault_id: raise AnsibleOptionsError("'%s' is not a valid vault id. The character ';' is not allowed in vault ids" % vault_id) if self.action not in self.can_output: if len(self.args) == 0: raise AnsibleOptionsError("Vault requires at least one filename as a parameter") else: # This restriction should remain in place until it's possible to # load multiple YAML records from a single file, or it's too easy # to create an encrypted file that can't be read back in. But in # the meanwhile, "cat a b c|ansible-vault encrypt --output x" is # a workaround. if self.options.output_file and len(self.args) > 1: raise AnsibleOptionsError("At most one input file may be used with the --output option") if self.action == 'encrypt_string': if '-' in self.args or len(self.args) == 0 or self.options.encrypt_string_stdin_name: self.encrypt_string_read_stdin = True # TODO: prompting from stdin and reading from stdin seem mutually exclusive, but verify that. if self.options.encrypt_string_prompt and self.encrypt_string_read_stdin: raise AnsibleOptionsError('The --prompt option is not supported if also reading input from stdin') def run(self): super(VaultCLI, self).run() loader = DataLoader() # set default restrictive umask old_umask = os.umask(0o077) vault_ids = self.options.vault_ids # there are 3 types of actions, those that just 'read' (decrypt, view) and only # need to ask for a password once, and those that 'write' (create, encrypt) that # ask for a new password and confirm it, and 'read/write (rekey) that asks for the # old password, then asks for a new one and confirms it. default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST vault_ids = default_vault_ids + vault_ids # TODO: instead of prompting for these before, we could let VaultEditor # call a callback when it needs it. if self.action in ['decrypt', 'view', 'rekey', 'edit']: vault_secrets = self.setup_vault_secrets(loader, vault_ids=vault_ids, vault_password_files=self.options.vault_password_files, ask_vault_pass=self.options.ask_vault_pass) if not vault_secrets: raise AnsibleOptionsError("A vault password is required to use Ansible's Vault") if self.action in ['encrypt', 'encrypt_string', 'create']: encrypt_vault_id = None # no --encrypt-vault-id self.options.encrypt_vault_id for 'edit' if self.action not in ['edit']: encrypt_vault_id = self.options.encrypt_vault_id or C.DEFAULT_VAULT_ENCRYPT_IDENTITY vault_secrets = None vault_secrets = \ self.setup_vault_secrets(loader, vault_ids=vault_ids, vault_password_files=self.options.vault_password_files, ask_vault_pass=self.options.ask_vault_pass, create_new_password=True) if len(vault_secrets) > 1 and not encrypt_vault_id: raise AnsibleOptionsError("The vault-ids %s are available to encrypt. Specify the vault-id to encrypt with --encrypt-vault-id" % ','.join([x[0] for x in vault_secrets])) if not vault_secrets: raise AnsibleOptionsError("A vault password is required to use Ansible's Vault") encrypt_secret = match_encrypt_secret(vault_secrets, encrypt_vault_id=encrypt_vault_id) # only one secret for encrypt for now, use the first vault_id and use its first secret # TODO: exception if more than one? self.encrypt_vault_id = encrypt_secret[0] self.encrypt_secret = encrypt_secret[1] if self.action in ['rekey']: encrypt_vault_id = self.options.encrypt_vault_id or C.DEFAULT_VAULT_ENCRYPT_IDENTITY # print('encrypt_vault_id: %s' % encrypt_vault_id) # print('default_encrypt_vault_id: %s' % default_encrypt_vault_id) # new_vault_ids should only ever be one item, from # load the default vault ids if we are using encrypt-vault-id new_vault_ids = [] if encrypt_vault_id: new_vault_ids = default_vault_ids if self.options.new_vault_id: new_vault_ids.append(self.options.new_vault_id) new_vault_password_files = [] if self.options.new_vault_password_file: new_vault_password_files.append(self.options.new_vault_password_file) new_vault_secrets = \ self.setup_vault_secrets(loader, vault_ids=new_vault_ids, vault_password_files=new_vault_password_files, ask_vault_pass=self.options.ask_vault_pass, create_new_password=True) if not new_vault_secrets: raise AnsibleOptionsError("A new vault password is required to use Ansible's Vault rekey") # There is only one new_vault_id currently and one new_vault_secret, or we # use the id specified in --encrypt-vault-id new_encrypt_secret = match_encrypt_secret(new_vault_secrets, encrypt_vault_id=encrypt_vault_id) self.new_encrypt_vault_id = new_encrypt_secret[0] self.new_encrypt_secret = new_encrypt_secret[1] loader.set_vault_secrets(vault_secrets) # FIXME: do we need to create VaultEditor here? its not reused vault = VaultLib(vault_secrets) self.editor = VaultEditor(vault) self.execute() # and restore umask os.umask(old_umask) def execute_encrypt(self): ''' encrypt the supplied file using the provided vault secret ''' if len(self.args) == 0 and sys.stdin.isatty(): display.display("Reading plaintext input from stdin", stderr=True) for f in self.args or ['-']: # Fixme: use the correct vau self.editor.encrypt_file(f, self.encrypt_secret, vault_id=self.encrypt_vault_id, output_file=self.options.output_file) if sys.stdout.isatty(): display.display("Encryption successful", stderr=True) def format_ciphertext_yaml(self, b_ciphertext, indent=None, name=None): indent = indent or 10 block_format_var_name = "" if name: block_format_var_name = "%s: " % name block_format_header = "%s!vault |" % block_format_var_name lines = [] vault_ciphertext = to_text(b_ciphertext) lines.append(block_format_header) for line in vault_ciphertext.splitlines(): lines.append('%s%s' % (' ' * indent, line)) yaml_ciphertext = '\n'.join(lines) return yaml_ciphertext def execute_encrypt_string(self): ''' encrypt the supplied string using the provided vault secret ''' b_plaintext = None # Holds tuples (the_text, the_source_of_the_string, the variable name if its provided). b_plaintext_list = [] # remove the non-option '-' arg (used to indicate 'read from stdin') from the candidate args so # we don't add it to the plaintext list args = [x for x in self.args if x != '-'] # We can prompt and read input, or read from stdin, but not both. if self.options.encrypt_string_prompt: msg = "String to encrypt: " name = None name_prompt_response = display.prompt('Variable name (enter for no name): ') # TODO: enforce var naming rules? if name_prompt_response != "": name = name_prompt_response # TODO: could prompt for which vault_id to use for each plaintext string # currently, it will just be the default # could use private=True for shadowed input if useful prompt_response = display.prompt(msg) if prompt_response == '': raise AnsibleOptionsError('The plaintext provided from the prompt was empty, not encrypting') b_plaintext = to_bytes(prompt_response) b_plaintext_list.append((b_plaintext, self.FROM_PROMPT, name)) # read from stdin if self.encrypt_string_read_stdin: if sys.stdout.isatty(): display.display("Reading plaintext input from stdin. (ctrl-d to end input)", stderr=True) stdin_text = sys.stdin.read() if stdin_text == '': raise AnsibleOptionsError('stdin was empty, not encrypting') b_plaintext = to_bytes(stdin_text) # defaults to None name = self.options.encrypt_string_stdin_name b_plaintext_list.append((b_plaintext, self.FROM_STDIN, name)) # use any leftover args as strings to encrypt # Try to match args up to --name options if hasattr(self.options, 'encrypt_string_names') and self.options.encrypt_string_names: name_and_text_list = list(zip(self.options.encrypt_string_names, args)) # Some but not enough --name's to name each var if len(args) > len(name_and_text_list): # Trying to avoid ever showing the plaintext in the output, so this warning is vague to avoid that. display.display('The number of --name options do not match the number of args.', stderr=True) display.display('The last named variable will be "%s". The rest will not have names.' % self.options.encrypt_string_names[-1], stderr=True) # Add the rest of the args without specifying a name for extra_arg in args[len(name_and_text_list):]: name_and_text_list.append((None, extra_arg)) # if no --names are provided, just use the args without a name. else: name_and_text_list = [(None, x) for x in args] # Convert the plaintext text objects to bytestrings and collect for name_and_text in name_and_text_list: name, plaintext = name_and_text if plaintext == '': raise AnsibleOptionsError('The plaintext provided from the command line args was empty, not encrypting') b_plaintext = to_bytes(plaintext) b_plaintext_list.append((b_plaintext, self.FROM_ARGS, name)) # TODO: specify vault_id per string? # Format the encrypted strings and any corresponding stderr output outputs = self._format_output_vault_strings(b_plaintext_list, vault_id=self.encrypt_vault_id) for output in outputs: err = output.get('err', None) out = output.get('out', '') if err: sys.stderr.write(err) print(out) if sys.stdout.isatty(): display.display("Encryption successful", stderr=True) # TODO: offer block or string ala eyaml def _format_output_vault_strings(self, b_plaintext_list, vault_id=None): # If we are only showing one item in the output, we don't need to included commented # delimiters in the text show_delimiter = False if len(b_plaintext_list) > 1: show_delimiter = True # list of dicts {'out': '', 'err': ''} output = [] # Encrypt the plaintext, and format it into a yaml block that can be pasted into a playbook. # For more than one input, show some differentiating info in the stderr output so we can tell them # apart. If we have a var name, we include that in the yaml for index, b_plaintext_info in enumerate(b_plaintext_list): # (the text itself, which input it came from, its name) b_plaintext, src, name = b_plaintext_info b_ciphertext = self.editor.encrypt_bytes(b_plaintext, self.encrypt_secret, vault_id=vault_id) # block formatting yaml_text = self.format_ciphertext_yaml(b_ciphertext, name=name) err_msg = None if show_delimiter: human_index = index + 1 if name: err_msg = '# The encrypted version of variable ("%s", the string #%d from %s).\n' % (name, human_index, src) else: err_msg = '# The encrypted version of the string #%d from %s.)\n' % (human_index, src) output.append({'out': yaml_text, 'err': err_msg}) return output def execute_decrypt(self): ''' decrypt the supplied file using the provided vault secret ''' if len(self.args) == 0 and sys.stdin.isatty(): display.display("Reading ciphertext input from stdin", stderr=True) for f in self.args or ['-']: self.editor.decrypt_file(f, output_file=self.options.output_file) if sys.stdout.isatty(): display.display("Decryption successful", stderr=True) def execute_create(self): ''' create and open a file in an editor that will be encryped with the provided vault secret when closed''' if len(self.args) > 1: raise AnsibleOptionsError("ansible-vault create can take only one filename argument") self.editor.create_file(self.args[0], self.encrypt_secret, vault_id=self.encrypt_vault_id) def execute_edit(self): ''' open and decrypt an existing vaulted file in an editor, that will be encryped again when closed''' for f in self.args: self.editor.edit_file(f) def execute_view(self): ''' open, decrypt and view an existing vaulted file using a pager using the supplied vault secret ''' for f in self.args: # Note: vault should return byte strings because it could encrypt # and decrypt binary files. We are responsible for changing it to # unicode here because we are displaying it and therefore can make # the decision that the display doesn't have to be precisely what # the input was (leave that to decrypt instead) plaintext = self.editor.plaintext(f) self.pager(to_text(plaintext)) def execute_rekey(self): ''' re-encrypt a vaulted file with a new secret, the previous secret is required ''' for f in self.args: # FIXME: plumb in vault_id, use the default new_vault_secret for now self.editor.rekey_file(f, self.new_encrypt_secret, self.new_encrypt_vault_id) display.display("Rekey successful", stderr=True) ansible-2.5.1/lib/ansible/compat/0000755000000000000000000000000013265756221016551 5ustar rootroot00000000000000ansible-2.5.1/lib/ansible/compat/selectors/0000755000000000000000000000000013265756221020554 5ustar rootroot00000000000000ansible-2.5.1/lib/ansible/compat/selectors/__init__.py0000644000000000000000000000306613265756155022700 0ustar rootroot00000000000000# (c) 2014, 2017 Toshio Kuratomi # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type ''' Compat selectors library. Python-3.5 has this builtin. The selectors2 package exists on pypi to backport the functionality as far as python-2.6. ''' # The following makes it easier for us to script updates of the bundled code _BUNDLED_METADATA = {"pypi_name": "selectors2", "version": "1.1.0"} import os.path import sys try: # Python 3.4+ import selectors as _system_selectors except ImportError: try: # backport package installed in the system import selectors2 as _system_selectors except ImportError: _system_selectors = None if _system_selectors: selectors = _system_selectors else: # Our bundled copy from . import _selectors2 as selectors sys.modules['ansible.compat.selectors'] = selectors ansible-2.5.1/lib/ansible/compat/selectors/_selectors2.py0000644000000000000000000005542413265756155023372 0ustar rootroot00000000000000# This file is from the selectors2.py package. It backports the PSF Licensed # selectors module from the Python-3.5 stdlib to older versions of Python. # The author, Seth Michael Larson, dual licenses his modifications under the # PSF License and MIT License: # https://github.com/SethMichaelLarson/selectors2#license # # Copyright (c) 2016 Seth Michael Larson # # PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0) # MIT License (see licenses/MIT-license.txt or https://opensource.org/licenses/MIT) # # Backport of selectors.py from Python 3.5+ to support Python < 3.4 # Also has the behavior specified in PEP 475 which is to retry syscalls # in the case of an EINTR error. This module is required because selectors34 # does not follow this behavior and instead returns that no dile descriptor # events have occurred rather than retry the syscall. The decision to drop # support for select.devpoll is made to maintain 100% test coverage. import errno import math import select import socket import sys import time from collections import namedtuple, Mapping try: monotonic = time.monotonic except (AttributeError, ImportError): # Python 3.3< monotonic = time.time __author__ = 'Seth Michael Larson' __email__ = 'sethmichaellarson@protonmail.com' __version__ = '1.1.0' __license__ = 'MIT' __all__ = [ 'EVENT_READ', 'EVENT_WRITE', 'SelectorError', 'SelectorKey', 'DefaultSelector' ] EVENT_READ = (1 << 0) EVENT_WRITE = (1 << 1) HAS_SELECT = True # Variable that shows whether the platform has a selector. _SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None. class SelectorError(Exception): def __init__(self, errcode): super(SelectorError, self).__init__() self.errno = errcode def __repr__(self): return "".format(self.errno) def __str__(self): return self.__repr__() def _fileobj_to_fd(fileobj): """ Return a file descriptor from a file object. If given an integer will simply return that integer back. """ if isinstance(fileobj, int): fd = fileobj else: try: fd = int(fileobj.fileno()) except (AttributeError, TypeError, ValueError): raise ValueError("Invalid file object: {0!r}".format(fileobj)) if fd < 0: raise ValueError("Invalid file descriptor: {0}".format(fd)) return fd # Python 3.5 uses a more direct route to wrap system calls to increase speed. if sys.version_info >= (3, 5): def _syscall_wrapper(func, _, *args, **kwargs): """ This is the short-circuit version of the below logic because in Python 3.5+ all selectors restart system calls. """ try: return func(*args, **kwargs) except (OSError, IOError, select.error) as e: errcode = None if hasattr(e, "errno"): errcode = e.errno elif hasattr(e, "args"): errcode = e.args[0] raise SelectorError(errcode) else: def _syscall_wrapper(func, recalc_timeout, *args, **kwargs): """ Wrapper function for syscalls that could fail due to EINTR. All functions should be retried if there is time left in the timeout in accordance with PEP 475. """ timeout = kwargs.get("timeout", None) if timeout is None: expires = None recalc_timeout = False else: timeout = float(timeout) if timeout < 0.0: # Timeout less than 0 treated as no timeout. expires = None else: expires = monotonic() + timeout args = list(args) if recalc_timeout and "timeout" not in kwargs: raise ValueError( "Timeout must be in args or kwargs to be recalculated") result = _SYSCALL_SENTINEL while result is _SYSCALL_SENTINEL: try: result = func(*args, **kwargs) # OSError is thrown by select.select # IOError is thrown by select.epoll.poll # select.error is thrown by select.poll.poll # Aren't we thankful for Python 3.x rework for exceptions? except (OSError, IOError, select.error) as e: # select.error wasn't a subclass of OSError in the past. errcode = None if hasattr(e, "errno"): errcode = e.errno elif hasattr(e, "args"): errcode = e.args[0] # Also test for the Windows equivalent of EINTR. is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and errcode == errno.WSAEINTR)) if is_interrupt: if expires is not None: current_time = monotonic() if current_time > expires: raise OSError(errno=errno.ETIMEDOUT) if recalc_timeout: if "timeout" in kwargs: kwargs["timeout"] = expires - current_time continue if errcode: raise SelectorError(errcode) else: raise return result SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data']) class _SelectorMapping(Mapping): """ Mapping of file objects to selector keys """ def __init__(self, selector): self._selector = selector def __len__(self): return len(self._selector._fd_to_key) def __getitem__(self, fileobj): try: fd = self._selector._fileobj_lookup(fileobj) return self._selector._fd_to_key[fd] except KeyError: raise KeyError("{0!r} is not registered.".format(fileobj)) def __iter__(self): return iter(self._selector._fd_to_key) class BaseSelector(object): """ Abstract Selector class A selector supports registering file objects to be monitored for specific I/O events. A file object is a file descriptor or any object with a `fileno()` method. An arbitrary object can be attached to the file object which can be used for example to store context info, a callback, etc. A selector can use various implementations (select(), poll(), epoll(), and kqueue()) depending on the platform. The 'DefaultSelector' class uses the most efficient implementation for the current platform. """ def __init__(self): # Maps file descriptors to keys. self._fd_to_key = {} # Read-only mapping returned by get_map() self._map = _SelectorMapping(self) def _fileobj_lookup(self, fileobj): """ Return a file descriptor from a file object. This wraps _fileobj_to_fd() to do an exhaustive search in case the object is invalid but we still have it in our map. Used by unregister() so we can unregister an object that was previously registered even if it is closed. It is also used by _SelectorMapping """ try: return _fileobj_to_fd(fileobj) except ValueError: # Search through all our mapped keys. for key in self._fd_to_key.values(): if key.fileobj is fileobj: return key.fd # Raise ValueError after all. raise def register(self, fileobj, events, data=None): """ Register a file object for a set of events to monitor. """ if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)): raise ValueError("Invalid events: {0!r}".format(events)) key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data) if key.fd in self._fd_to_key: raise KeyError("{0!r} (FD {1}) is already registered" .format(fileobj, key.fd)) self._fd_to_key[key.fd] = key return key def unregister(self, fileobj): """ Unregister a file object from being monitored. """ try: key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) except KeyError: raise KeyError("{0!r} is not registered".format(fileobj)) # Getting the fileno of a closed socket on Windows errors with EBADF. except socket.error as err: if err.errno != errno.EBADF: raise else: for key in self._fd_to_key.values(): if key.fileobj is fileobj: self._fd_to_key.pop(key.fd) break else: raise KeyError("{0!r} is not registered".format(fileobj)) return key def modify(self, fileobj, events, data=None): """ Change a registered file object monitored events and data. """ # NOTE: Some subclasses optimize this operation even further. try: key = self._fd_to_key[self._fileobj_lookup(fileobj)] except KeyError: raise KeyError("{0!r} is not registered".format(fileobj)) if events != key.events: self.unregister(fileobj) key = self.register(fileobj, events, data) elif data != key.data: # Use a shortcut to update the data. key = key._replace(data=data) self._fd_to_key[key.fd] = key return key def select(self, timeout=None): """ Perform the actual selection until some monitored file objects are ready or the timeout expires. """ raise NotImplementedError() def close(self): """ Close the selector. This must be called to ensure that all underlying resources are freed. """ self._fd_to_key.clear() self._map = None def get_key(self, fileobj): """ Return the key associated with a registered file object. """ mapping = self.get_map() if mapping is None: raise RuntimeError("Selector is closed") try: return mapping[fileobj] except KeyError: raise KeyError("{0!r} is not registered".format(fileobj)) def get_map(self): """ Return a mapping of file objects to selector keys """ return self._map def _key_from_fd(self, fd): """ Return the key associated to a given file descriptor Return None if it is not found. """ try: return self._fd_to_key[fd] except KeyError: return None def __enter__(self): return self def __exit__(self, *args): self.close() # Almost all platforms have select.select() if hasattr(select, "select"): class SelectSelector(BaseSelector): """ Select-based selector. """ def __init__(self): super(SelectSelector, self).__init__() self._readers = set() self._writers = set() def register(self, fileobj, events, data=None): key = super(SelectSelector, self).register(fileobj, events, data) if events & EVENT_READ: self._readers.add(key.fd) if events & EVENT_WRITE: self._writers.add(key.fd) return key def unregister(self, fileobj): key = super(SelectSelector, self).unregister(fileobj) self._readers.discard(key.fd) self._writers.discard(key.fd) return key def _select(self, r, w, timeout=None): """ Wrapper for select.select because timeout is a positional arg """ return select.select(r, w, [], timeout) def select(self, timeout=None): # Selecting on empty lists on Windows errors out. if not len(self._readers) and not len(self._writers): return [] timeout = None if timeout is None else max(timeout, 0.0) ready = [] r, w, _ = _syscall_wrapper(self._select, True, self._readers, self._writers, timeout) r = set(r) w = set(w) for fd in r | w: events = 0 if fd in r: events |= EVENT_READ if fd in w: events |= EVENT_WRITE key = self._key_from_fd(fd) if key: ready.append((key, events & key.events)) return ready __all__.append('SelectSelector') if hasattr(select, "poll"): class PollSelector(BaseSelector): """ Poll-based selector """ def __init__(self): super(PollSelector, self).__init__() self._poll = select.poll() def register(self, fileobj, events, data=None): key = super(PollSelector, self).register(fileobj, events, data) event_mask = 0 if events & EVENT_READ: event_mask |= select.POLLIN if events & EVENT_WRITE: event_mask |= select.POLLOUT self._poll.register(key.fd, event_mask) return key def unregister(self, fileobj): key = super(PollSelector, self).unregister(fileobj) self._poll.unregister(key.fd) return key def _wrap_poll(self, timeout=None): """ Wrapper function for select.poll.poll() so that _syscall_wrapper can work with only seconds. """ if timeout is not None: if timeout <= 0: timeout = 0 else: # select.poll.poll() has a resolution of 1 millisecond, # round away from zero to wait *at least* timeout seconds. timeout = math.ceil(timeout * 1e3) result = self._poll.poll(timeout) return result def select(self, timeout=None): ready = [] fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout) for fd, event_mask in fd_events: events = 0 if event_mask & ~select.POLLIN: events |= EVENT_WRITE if event_mask & ~select.POLLOUT: events |= EVENT_READ key = self._key_from_fd(fd) if key: ready.append((key, events & key.events)) return ready __all__.append('PollSelector') if hasattr(select, "epoll"): class EpollSelector(BaseSelector): """ Epoll-based selector """ def __init__(self): super(EpollSelector, self).__init__() self._epoll = select.epoll() def fileno(self): return self._epoll.fileno() def register(self, fileobj, events, data=None): key = super(EpollSelector, self).register(fileobj, events, data) events_mask = 0 if events & EVENT_READ: events_mask |= select.EPOLLIN if events & EVENT_WRITE: events_mask |= select.EPOLLOUT _syscall_wrapper(self._epoll.register, False, key.fd, events_mask) return key def unregister(self, fileobj): key = super(EpollSelector, self).unregister(fileobj) try: _syscall_wrapper(self._epoll.unregister, False, key.fd) except SelectorError: # This can occur when the fd was closed since registry. pass return key def select(self, timeout=None): if timeout is not None: if timeout <= 0: timeout = 0.0 else: # select.epoll.poll() has a resolution of 1 millisecond # but luckily takes seconds so we don't need a wrapper # like PollSelector. Just for better rounding. timeout = math.ceil(timeout * 1e3) * 1e-3 timeout = float(timeout) else: timeout = -1.0 # epoll.poll() must have a float. # We always want at least 1 to ensure that select can be called # with no file descriptors registered. Otherwise will fail. max_events = max(len(self._fd_to_key), 1) ready = [] fd_events = _syscall_wrapper(self._epoll.poll, True, timeout=timeout, maxevents=max_events) for fd, event_mask in fd_events: events = 0 if event_mask & ~select.EPOLLIN: events |= EVENT_WRITE if event_mask & ~select.EPOLLOUT: events |= EVENT_READ key = self._key_from_fd(fd) if key: ready.append((key, events & key.events)) return ready def close(self): self._epoll.close() super(EpollSelector, self).close() __all__.append('EpollSelector') if hasattr(select, "devpoll"): class DevpollSelector(BaseSelector): """Solaris /dev/poll selector.""" def __init__(self): super(DevpollSelector, self).__init__() self._devpoll = select.devpoll() def fileno(self): return self._devpoll.fileno() def register(self, fileobj, events, data=None): key = super(DevpollSelector, self).register(fileobj, events, data) poll_events = 0 if events & EVENT_READ: poll_events |= select.POLLIN if events & EVENT_WRITE: poll_events |= select.POLLOUT self._devpoll.register(key.fd, poll_events) return key def unregister(self, fileobj): key = super(DevpollSelector, self).unregister(fileobj) self._devpoll.unregister(key.fd) return key def _wrap_poll(self, timeout=None): """ Wrapper function for select.poll.poll() so that _syscall_wrapper can work with only seconds. """ if timeout is not None: if timeout <= 0: timeout = 0 else: # select.devpoll.poll() has a resolution of 1 millisecond, # round away from zero to wait *at least* timeout seconds. timeout = math.ceil(timeout * 1e3) result = self._devpoll.poll(timeout) return result def select(self, timeout=None): ready = [] fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout) for fd, event_mask in fd_events: events = 0 if event_mask & ~select.POLLIN: events |= EVENT_WRITE if event_mask & ~select.POLLOUT: events |= EVENT_READ key = self._key_from_fd(fd) if key: ready.append((key, events & key.events)) return ready def close(self): self._devpoll.close() super(DevpollSelector, self).close() __all__.append('DevpollSelector') if hasattr(select, "kqueue"): class KqueueSelector(BaseSelector): """ Kqueue / Kevent-based selector """ def __init__(self): super(KqueueSelector, self).__init__() self._kqueue = select.kqueue() def fileno(self): return self._kqueue.fileno() def register(self, fileobj, events, data=None): key = super(KqueueSelector, self).register(fileobj, events, data) if events & EVENT_READ: kevent = select.kevent(key.fd, select.KQ_FILTER_READ, select.KQ_EV_ADD) _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) if events & EVENT_WRITE: kevent = select.kevent(key.fd, select.KQ_FILTER_WRITE, select.KQ_EV_ADD) _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) return key def unregister(self, fileobj): key = super(KqueueSelector, self).unregister(fileobj) if key.events & EVENT_READ: kevent = select.kevent(key.fd, select.KQ_FILTER_READ, select.KQ_EV_DELETE) try: _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) except SelectorError: pass if key.events & EVENT_WRITE: kevent = select.kevent(key.fd, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE) try: _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0) except SelectorError: pass return key def select(self, timeout=None): if timeout is not None: timeout = max(timeout, 0) max_events = len(self._fd_to_key) * 2 ready_fds = {} kevent_list = _syscall_wrapper(self._kqueue.control, True, None, max_events, timeout) for kevent in kevent_list: fd = kevent.ident event_mask = kevent.filter events = 0 if event_mask == select.KQ_FILTER_READ: events |= EVENT_READ if event_mask == select.KQ_FILTER_WRITE: events |= EVENT_WRITE key = self._key_from_fd(fd) if key: if key.fd not in ready_fds: ready_fds[key.fd] = (key, events & key.events) else: old_events = ready_fds[key.fd][1] ready_fds[key.fd] = (key, (events | old_events) & key.events) return list(ready_fds.values()) def close(self): self._kqueue.close() super(KqueueSelector, self).close() __all__.append('KqueueSelector') # Choose the best implementation, roughly: # kqueue == epoll == devpoll > poll > select. # select() also can't accept a FD > FD_SETSIZE (usually around 1024) if 'KqueueSelector' in globals(): # Platform-specific: Mac OS and BSD DefaultSelector = KqueueSelector elif 'DevpollSelector' in globals(): DefaultSelector = DevpollSelector elif 'EpollSelector' in globals(): # Platform-specific: Linux DefaultSelector = EpollSelector elif 'PollSelector' in globals(): # Platform-specific: Linux DefaultSelector = PollSelector elif 'SelectSelector' in globals(): # Platform-specific: Windows DefaultSelector = SelectSelector else: # Platform-specific: AppEngine def no_selector(_): raise ValueError("Platform does not have a selector") DefaultSelector = no_selector HAS_SELECT = False ansible-2.5.1/lib/ansible/compat/tests/0000755000000000000000000000000013265756221017713 5ustar rootroot00000000000000ansible-2.5.1/lib/ansible/compat/tests/__init__.py0000644000000000000000000000236313265756155022036 0ustar rootroot00000000000000# (c) 2014, Toshio Kuratomi # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type ''' This module contains things that are only needed for compat in the testsuites, not in ansible itself. If you are not installing the test suite, you can safely remove this subdirectory. ''' # # Compat for python2.7 # # One unittest needs to import builtins via __import__() so we need to have # the string that represents it try: import __builtin__ except ImportError: BUILTINS = 'builtins' else: BUILTINS = '__builtin__' ansible-2.5.1/lib/ansible/compat/tests/mock.py0000644000000000000000000001071413265756155021227 0ustar rootroot00000000000000# (c) 2014, Toshio Kuratomi # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type ''' Compat module for Python3.x's unittest.mock module ''' import sys # Python 2.7 # Note: Could use the pypi mock library on python3.x as well as python2.x. It # is the same as the python3 stdlib mock library try: # Allow wildcard import because we really do want to import all of mock's # symbols into this compat shim # pylint: disable=wildcard-import,unused-wildcard-import from unittest.mock import * except ImportError: # Python 2 # pylint: disable=wildcard-import,unused-wildcard-import try: from mock import * except ImportError: print('You need the mock library installed on python2.x to run tests') # Prior to 3.4.4, mock_open cannot handle binary read_data if sys.version_info >= (3,) and sys.version_info < (3, 4, 4): file_spec = None def _iterate_read_data(read_data): # Helper for mock_open: # Retrieve lines from read_data via a generator so that separate calls to # readline, read, and readlines are properly interleaved sep = b'\n' if isinstance(read_data, bytes) else '\n' data_as_list = [l + sep for l in read_data.split(sep)] if data_as_list[-1] == sep: # If the last line ended in a newline, the list comprehension will have an # extra entry that's just a newline. Remove this. data_as_list = data_as_list[:-1] else: # If there wasn't an extra newline by itself, then the file being # emulated doesn't have a newline to end the last line remove the # newline that our naive format() added data_as_list[-1] = data_as_list[-1][:-1] for line in data_as_list: yield line def mock_open(mock=None, read_data=''): """ A helper function to create a mock to replace the use of `open`. It works for `open` called directly or used as a context manager. The `mock` argument is the mock object to configure. If `None` (the default) then a `MagicMock` will be created for you, with the API limited to methods or attributes available on standard file handles. `read_data` is a string for the `read` methoddline`, and `readlines` of the file handle to return. This is an empty string by default. """ def _readlines_side_effect(*args, **kwargs): if handle.readlines.return_value is not None: return handle.readlines.return_value return list(_data) def _read_side_effect(*args, **kwargs): if handle.read.return_value is not None: return handle.read.return_value return type(read_data)().join(_data) def _readline_side_effect(): if handle.readline.return_value is not None: while True: yield handle.readline.return_value for line in _data: yield line global file_spec if file_spec is None: import _io file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) if mock is None: mock = MagicMock(name='open', spec=open) handle = MagicMock(spec=file_spec) handle.__enter__.return_value = handle _data = _iterate_read_data(read_data) handle.write.return_value = None handle.read.return_value = None handle.readline.return_value = None handle.readlines.return_value = None handle.read.side_effect = _read_side_effect handle.readline.side_effect = _readline_side_effect() handle.readlines.side_effect = _readlines_side_effect mock.return_value = handle return mock ansible-2.5.1/lib/ansible/compat/tests/unittest.py0000644000000000000000000000242613265756155022156 0ustar rootroot00000000000000# (c) 2014, Toshio Kuratomi # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type ''' Compat module for Python2.7's unittest module ''' import sys # Allow wildcard import because we really do want to import all of # unittests's symbols into this compat shim # pylint: disable=wildcard-import,unused-wildcard-import if sys.version_info < (2, 7): try: # Need unittest2 on python2.6 from unittest2 import * except ImportError: print('You need unittest2 installed on python2.6.x to run tests') else: from unittest import * ansible-2.5.1/lib/ansible/compat/__init__.py0000644000000000000000000000207713265756155020676 0ustar rootroot00000000000000# (c) 2014, Toshio Kuratomi # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type ''' Compat library for ansible. This contains compatibility definitions for older python When we need to import a module differently depending on python version, do it here. Then in the code we can simply import from compat in order to get what we want. ''' ansible-2.5.1/lib/ansible/config/0000755000000000000000000000000013265756221016533 5ustar rootroot00000000000000ansible-2.5.1/lib/ansible/config/__init__.py0000644000000000000000000000000013265756155020640 0ustar rootroot00000000000000ansible-2.5.1/lib/ansible/config/base.yml0000644000000000000000000016514713265756155020214 0ustar rootroot00000000000000# Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) --- ALLOW_WORLD_READABLE_TMPFILES: name: Allow world readable temporary files default: False description: - This makes the temporary files created on the machine to be world readable and will issue a warning instead of failing the task. - It is useful when becoming an unprivileged user. env: [] ini: - {key: allow_world_readable_tmpfiles, section: defaults} type: boolean yaml: {key: defaults.allow_world_readable_tmpfiles} version_added: "2.1" ANSIBLE_COW_SELECTION: name: Cowsay filter selection default: default description: This allows you to chose a specific cowsay stencil for the banners or use 'random' to cycle through them. env: [{name: ANSIBLE_COW_SELECTION}] ini: - {key: cow_selection, section: defaults} ANSIBLE_COW_WHITELIST: name: Cowsay filter whitelist default: ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant', 'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep', 'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder', 'vader-koala', 'vader', 'www'] description: White list of cowsay templates that are 'safe' to use, set to empty list if you want to enable all installed templates. env: [{name: ANSIBLE_COW_WHITELIST}] ini: - {key: cow_whitelist, section: defaults} type: list yaml: {key: display.cowsay_whitelist} ANSIBLE_FORCE_COLOR: name: Force color output default: False description: This options forces color mode even when running without a TTY or the "nocolor" setting is True. env: [{name: ANSIBLE_FORCE_COLOR}] ini: - {key: force_color, section: defaults} type: boolean yaml: {key: display.force_color} ANSIBLE_NOCOLOR: name: Suppress color output default: False description: This setting allows suppressing colorizing output, which is used to give a better indication of failure and status information. env: [{name: ANSIBLE_NOCOLOR}] ini: - {key: nocolor, section: defaults} type: boolean yaml: {key: display.nocolor} ANSIBLE_NOCOWS: name: Suppress cowsay output default: False description: If you have cowsay installed but want to avoid the 'cows' (why????), use this. env: [{name: ANSIBLE_NOCOWS}] ini: - {key: nocows, section: defaults} type: boolean yaml: {key: display.i_am_no_fun} ANSIBLE_COW_PATH: name: Set path to cowsay command default: null description: Specify a custom cowsay path or swap in your cowsay implementation of choice env: [{name: ANSIBLE_COW_PATH}] ini: - {key: cowpath, section: defaults} type: string yaml: {key: display.cowpath} ANSIBLE_PIPELINING: name: Connection pipelining default: False description: - Pipelining, if supported by the connection plugin, reduces the number of network operations required to execute a module on the remote server, by executing many Ansible modules without actual file transfer. - This can result in a very significant performance improvement when enabled. - "However this conflicts with privilege escalation (become). For example, when using 'sudo:' operations you must first disable 'requiretty' in /etc/sudoers on all managed hosts, which is why it is disabled by default." env: - name: ANSIBLE_PIPELINING - name: ANSIBLE_SSH_PIPELINING ini: - section: connection key: pipelining - section: ssh_connection key: pipelining type: boolean yaml: {key: plugins.connection.pipelining} ANSIBLE_SSH_ARGS: # TODO: move to ssh plugin default: -C -o ControlMaster=auto -o ControlPersist=60s description: - If set, this will override the Ansible default ssh arguments. - In particular, users may wish to raise the ControlPersist time to encourage performance. A value of 30 minutes may be appropriate. - Be aware that if `-o ControlPath` is set in ssh_args, the control path setting is not used. env: [{name: ANSIBLE_SSH_ARGS}] ini: - {key: ssh_args, section: ssh_connection} yaml: {key: ssh_connection.ssh_args} ANSIBLE_SSH_CONTROL_PATH: # TODO: move to ssh plugin default: null description: - This is the location to save ssh's ControlPath sockets, it uses ssh's variable substitution. - Since 2.3, if null, ansible will generate a unique hash. Use `%(directory)s` to indicate where to use the control dir path setting. - Before 2.3 it defaulted to `control_path=%(directory)s/ansible-ssh-%%h-%%p-%%r`. - Be aware that this setting is ignored if `-o ControlPath` is set in ssh args. env: [{name: ANSIBLE_SSH_CONTROL_PATH}] ini: - {key: control_path, section: ssh_connection} yaml: {key: ssh_connection.control_path} ANSIBLE_SSH_CONTROL_PATH_DIR: # TODO: move to ssh plugin default: ~/.ansible/cp description: - This sets the directory to use for ssh control path if the control path setting is null. - Also, provides the `%(directory)s` variable for the control path setting. env: [{name: ANSIBLE_SSH_CONTROL_PATH_DIR}] ini: - {key: control_path_dir, section: ssh_connection} yaml: {key: ssh_connection.control_path_dir} ANSIBLE_SSH_EXECUTABLE: # TODO: move to ssh plugin default: ssh description: - This defines the location of the ssh binary. It defaults to `ssh` which will use the first ssh binary available in $PATH. - This option is usually not required, it might be useful when access to system ssh is restricted, or when using ssh wrappers to connect to remote hosts. env: [{name: ANSIBLE_SSH_EXECUTABLE}] ini: - {key: ssh_executable, section: ssh_connection} yaml: {key: ssh_connection.ssh_executable} version_added: "2.2" ANSIBLE_SSH_RETRIES: # TODO: move to ssh plugin default: 0 description: Number of attempts to establish a connection before we give up and report the host as 'UNREACHABLE' env: [{name: ANSIBLE_SSH_RETRIES}] ini: - {key: retries, section: ssh_connection} type: integer yaml: {key: ssh_connection.retries} ANY_ERRORS_FATAL: name: Make Task failures fatal default: False description: Sets the default value for the any_errors_fatal keyword, if True, Task failures will be considered fatal errors. env: - name: ANSIBLE_ANY_ERRORS_FATAL ini: - section: defaults key: any_errors_fatal type: boolean yaml: {key: errors.any_task_errors_fatal} version_added: "2.4" BECOME_ALLOW_SAME_USER: name: Allow becomming the same user default: False description: This setting controls if become is skipped when remote user and become user are the same. I.E root sudo to root. env: [{name: ANSIBLE_BECOME_ALLOW_SAME_USER}] ini: - {key: become_allow_same_user, section: privilege_escalation} type: boolean yaml: {key: privilege_escalation.become_allow_same_user} AGNOSTIC_BECOME_PROMPT: # TODO: Switch the default to True in either the Ansible 2.6 release or the 2.7 release, whichever happens after the Tower 3.3 release name: Display an agnostic become prompt default: False type: boolean description: Display an agnostic become prompt instead of displaying a prompt containing the command line supplied become method env: [{name: ANSIBLE_AGNOSTIC_BECOME_PROMPT}] ini: - {key: agnostic_become_prompt, section: privilege_escalation} yaml: {key: privilege_escalation.agnostic_become_prompt} version_added: "2.5" CACHE_PLUGIN: name: Persistent Cache plugin default: memory description: Chooses which cache plugin to use, the default 'memory' is ephimeral. env: [{name: ANSIBLE_CACHE_PLUGIN}] ini: - {key: fact_caching, section: defaults} yaml: {key: facts.cache.plugin} CACHE_PLUGIN_CONNECTION: name: Cache Plugin URI default: ~ description: Defines connection or path information for the cache plugin env: [{name: ANSIBLE_CACHE_PLUGIN_CONNECTION}] ini: - {key: fact_caching_connection, section: defaults} yaml: {key: facts.cache.uri} CACHE_PLUGIN_PREFIX: name: Cache Plugin table prefix default: ansible_facts description: Prefix to use for cache plugin files/tables env: [{name: ANSIBLE_CACHE_PLUGIN_PREFIX}] ini: - {key: fact_caching_prefix, section: defaults} yaml: {key: facts.cache.prefix} CACHE_PLUGIN_TIMEOUT: name: Cache Plugin expiration timeout default: 86400 description: Expiration timeout for the cache plugin data env: [{name: ANSIBLE_CACHE_PLUGIN_TIMEOUT}] ini: - {key: fact_caching_timeout, section: defaults} type: integer yaml: {key: facts.cache.timeout} COLOR_CHANGED: name: Color for 'changed' task status default: yellow description: Defines the color to use on 'Changed' task status env: [{name: ANSIBLE_COLOR_CHANGED}] ini: - {key: changed, section: colors} yaml: {key: display.colors.changed} COLOR_DEBUG: name: Color for debug statements default: dark gray description: Defines the color to use when emitting debug messages env: [{name: ANSIBLE_COLOR_DEBUG}] ini: - {key: debug, section: colors} yaml: {key: display.colors.debug} COLOR_DEPRECATE: name: Color for deprecation messages default: purple description: Defines the color to use when emitting deprecation messages env: [{name: ANSIBLE_COLOR_DEPRECATE}] ini: - {key: deprecate, section: colors} yaml: {key: display.colors.deprecate} COLOR_DIFF_ADD: name: Color for diff added display default: green description: Defines the color to use when showing added lines in diffs env: [{name: ANSIBLE_COLOR_DIFF_ADD}] ini: - {key: diff_add, section: colors} yaml: {key: display.colors.diff.add} COLOR_DIFF_LINES: name: Color for diff lines display default: cyan description: Defines the color to use when showing diffs env: [{name: ANSIBLE_COLOR_DIFF_LINES}] ini: - {key: diff_lines, section: colors} COLOR_DIFF_REMOVE: name: Color for diff removed display default: red description: Defines the color to use when showing removed lines in diffs env: [{name: ANSIBLE_COLOR_DIFF_REMOVE}] ini: - {key: diff_remove, section: colors} COLOR_ERROR: name: Color for error messages default: red description: Defines the color to use when emitting error messages env: [{name: ANSIBLE_COLOR_ERROR}] ini: - {key: error, section: colors} yaml: {key: colors.error} COLOR_HIGHLIGHT: name: Color for highlighting default: white description: Defines the color to use for highlighting env: [{name: ANSIBLE_COLOR_HIGHLIGHT}] ini: - {key: highlight, section: colors} COLOR_OK: name: Color for 'ok' task status default: green description: Defines the color to use when showing 'OK' task status env: [{name: ANSIBLE_COLOR_OK}] ini: - {key: ok, section: colors} COLOR_SKIP: name: Color for 'skip' task status default: cyan description: Defines the color to use when showing 'Skipped' task status env: [{name: ANSIBLE_COLOR_SKIP}] ini: - {key: skip, section: colors} COLOR_UNREACHABLE: name: Color for 'unreachable' host state default: bright red description: Defines the color to use on 'Unreachable' status env: [{name: ANSIBLE_COLOR_UNREACHABLE}] ini: - {key: unreachable, section: colors} COLOR_VERBOSE: name: Color for verbose messages default: blue description: Defines the color to use when emitting verbose messages. i.e those that show with '-v's. env: [{name: ANSIBLE_COLOR_VERBOSE}] ini: - {key: verbose, section: colors} COLOR_WARN: name: Color for warning messages default: bright purple description: Defines the color to use when emitting warning messages env: [{name: ANSIBLE_COLOR_WARN}] ini: - {key: warn, section: colors} ACTION_WARNINGS: name: Toggle action warnings default: True description: - By default Ansible will issue a warning when recieved from a task action (module or action plugin) - These warnings can be silenced by adjusting this setting to False. env: [{name: ANSIBLE_ACTION_WARNINGS}] ini: - {key: action_warnings, section: defaults} type: boolean version_added: "2.5" COMMAND_WARNINGS: name: Command module warnings default: True description: - By default Ansible will issue a warning when the shell or command module is used and the command appears to be similar to an existing Ansible module. - These warnings can be silenced by adjusting this setting to False. You can also control this at the task level with the module option ``warn``. env: [{name: ANSIBLE_COMMAND_WARNINGS}] ini: - {key: command_warnings, section: defaults} type: boolean version_added: "1.8" DEFAULT_ACTION_PLUGIN_PATH: name: Action plugins path default: ~/.ansible/plugins/action:/usr/share/ansible/plugins/action description: Colon separated paths in which Ansible will search for Action Plugins. env: [{name: ANSIBLE_ACTION_PLUGINS}] ini: - {key: action_plugins, section: defaults} type: pathspec yaml: {key: plugins.action.path} DEFAULT_ALLOW_UNSAFE_LOOKUPS: name: Allow unsafe lookups default: False description: - "When enabled, this option allows lookup plugins (whether used in variables as ``{{lookup('foo')}}`` or as a loop as with_foo) to return data that is not marked 'unsafe'." - By default, such data is marked as unsafe to prevent the templating engine from evaluating any jinja2 templating language, as this could represent a security risk. This option is provided to allow for backwards-compatibility, however users should first consider adding allow_unsafe=True to any lookups which may be expected to contain data which may be run through the templating engine late env: [] ini: - {key: allow_unsafe_lookups, section: defaults} type: boolean version_added: "2.2.3" DEFAULT_ASK_PASS: name: Ask for the login password default: False description: - This controls whether an Ansible playbook should prompt for a login password. If using SSH keys for authentication, you probably do not needed to change this setting. env: [{name: ANSIBLE_ASK_PASS}] ini: - {key: ask_pass, section: defaults} type: boolean yaml: {key: defaults.ask_pass} DEFAULT_ASK_SUDO_PASS: name: Ask for the sudo password default: False deprecated: why: In favor of Ansible Become, which is a generic framework. See become_ask_pass. version: "2.8" alternatives: become description: - This controls whether an Ansible playbook should prompt for a sudo password. env: [{name: ANSIBLE_ASK_SUDO_PASS}] ini: - {key: ask_sudo_pass, section: defaults} type: boolean DEFAULT_ASK_SU_PASS: name: Ask for the su password default: False deprecated: why: In favor of Ansible Become, which is a generic framework. See become_ask_pass. version: "2.8" alternatives: become description: - This controls whether an Ansible playbook should prompt for a su password. env: [{name: ANSIBLE_ASK_SU_PASS}] ini: - {key: ask_su_pass, section: defaults} type: boolean DEFAULT_ASK_VAULT_PASS: name: Ask for the vault password(s) default: False description: - This controls whether an Ansible playbook should prompt for a vault password. env: [{name: ANSIBLE_ASK_VAULT_PASS}] ini: - {key: ask_vault_pass, section: defaults} type: boolean DEFAULT_BECOME: name: Enable privilege escalation (become) default: False description: Toggles the use of privilege escalation, allowing you to 'become' another user after login. env: [{name: ANSIBLE_BECOME}] ini: - {key: become, section: privilege_escalation} type: boolean DEFAULT_BECOME_ASK_PASS: name: Ask for the privelege escalation (become) password default: False description: Toggle to prompt for privilege escalation password. env: [{name: ANSIBLE_BECOME_ASK_PASS}] ini: - {key: become_ask_pass, section: privilege_escalation} type: boolean DEFAULT_BECOME_METHOD: name: Choose privilege escalation method default: 'sudo' description: Privilege escalation method to use when `become` is enabled. env: [{name: ANSIBLE_BECOME_METHOD}] ini: - {section: privilege_escalation, key: become_method} DEFAULT_BECOME_EXE: name: Choose 'become' executable default: ~ description: 'executable to use for privilege escalation, otherwise Ansible will depend on PATH' env: [{name: ANSIBLE_BECOME_EXE}] ini: - {key: become_exe, section: privilege_escalation} DEFAULT_BECOME_FLAGS: name: Set 'become' executable options default: '' description: Flags to pass to the privilege escalation executable. env: [{name: ANSIBLE_BECOME_FLAGS}] ini: - {key: become_flags, section: privilege_escalation} DEFAULT_BECOME_USER: # FIXME: should really be blank and make -u passing optional depending on it name: Set the user you 'become' via privlege escalation default: root description: The user your login/remote user 'becomes' when using privilege escalation, most systems will use 'root' when no user is specified. env: [{name: ANSIBLE_BECOME_USER}] ini: - {key: become_user, section: privilege_escalation} yaml: {key: become.user} DEFAULT_CACHE_PLUGIN_PATH: name: Cache Plugins Path default: ~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache description: Colon separated paths in which Ansible will search for Cache Plugins. env: [{name: ANSIBLE_CACHE_PLUGINS}] ini: - {key: cache_plugins, section: defaults} type: pathspec DEFAULT_CALLABLE_WHITELIST: name: Template 'callable' whitelist default: [] description: Whitelist of callable methods to be made available to template evaluation env: [{name: ANSIBLE_CALLABLE_WHITELIST}] ini: - {key: callable_whitelist, section: defaults} type: list DEFAULT_CALLBACK_PLUGIN_PATH: name: Callback Plugins Path default: ~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback description: Colon separated paths in which Ansible will search for Callback Plugins. env: [{name: ANSIBLE_CALLBACK_PLUGINS}] ini: - {key: callback_plugins, section: defaults} type: pathspec yaml: {key: plugins.callback.path} DEFAULT_CALLBACK_WHITELIST: name: Callback Whitelist default: [] description: - "List of whitelisted callbacks, not all callbacks need whitelisting, but many of those shipped with Ansible do as we don't want them activated by default." env: [{name: ANSIBLE_CALLBACK_WHITELIST}] ini: - {key: callback_whitelist, section: defaults} type: list yaml: {key: plugins.callback.whitelist} DEFAULT_CONNECTION_PLUGIN_PATH: name: Connection Plugins Path default: ~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection description: Colon separated paths in which Ansible will search for Connection Plugins. env: [{name: ANSIBLE_CONNECTION_PLUGINS}] ini: - {key: connection_plugins, section: defaults} type: pathspec yaml: {key: plugins.connection.path} DEFAULT_DEBUG: name: Debug mode default: False description: Toggles debug output in Ansible, VERY verbose and can hinder multiprocessing. env: [{name: ANSIBLE_DEBUG}] ini: - {key: debug, section: defaults} type: boolean DEFAULT_EXECUTABLE: name: Target shell executable default: /bin/sh description: - "This indicates the command to use to spawn a shell under for Ansible's execution needs on a target. Users may need to change this in rare instances when shell usage is constrained, but in most cases it may be left as is." env: [{name: ANSIBLE_EXECUTABLE}] ini: - {key: executable, section: defaults} DEFAULT_FACT_PATH: name: local fact path default: ~ description: - "This option allows you to globally configure a custom path for 'local_facts' for the implied M(setup) task when using fact gathering." - "If not set, it will fallback to the default from the M(setup) module: ``/etc/ansible/facts.d``." - "This does **not** affect user defined tasks that use the M(setup) module." env: [{name: ANSIBLE_FACT_PATH}] ini: - {key: fact_path, section: defaults} type: path yaml: {key: facts.gathering.fact_path} DEFAULT_FILTER_PLUGIN_PATH: name: Jinja2 Filter Plugins Path default: ~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter description: Colon separated paths in which Ansible will search for Jinja2 Filter Plugins. env: [{name: ANSIBLE_FILTER_PLUGINS}] ini: - {key: filter_plugins, section: defaults} type: pathspec DEFAULT_FORCE_HANDLERS: name: Force handlers to run after failure default: False description: - This option controls if notified handlers run on a host even if a failure occurs on that host. - When false, the handlers will not run if a failure has occurred on a host. - This can also be set per play or on the command line. See Handlers and Failure for more details. env: [{name: ANSIBLE_FORCE_HANDLERS}] ini: - {key: force_handlers, section: defaults} type: boolean version_added: "1.9.1" DEFAULT_FORKS: name: Number of task forks default: 5 description: Maximum number of forks Ansible will use to execute tasks on target hosts. env: [{name: ANSIBLE_FORKS}] ini: - {key: forks, section: defaults} type: integer DEFAULT_GATHERING: name: Gathering behaviour default: 'implicit' description: - This setting controls the default policy of fact gathering (facts discovered about remote systems). - "When 'implicit' (the default), the cache plugin will be ignored and facts will be gathered per play unless 'gather_facts: False' is set." - "When 'explicit' the inverse is true, facts will not be gathered unless directly requested in the play." - "The 'smart' value means each new host that has no facts discovered will be scanned, but if the same host is addressed in multiple plays it will not be contacted again in the playbook run." - "This option can be useful for those wishing to save fact gathering time. Both 'smart' and 'explicit' will use the cache plugin." env: [{name: ANSIBLE_GATHERING}] ini: - key: gathering section: defaults version_added: "1.6" choices: ['smart', 'explicit', 'implicit'] DEFAULT_GATHER_SUBSET: name: Gather facts subset default: 'all' description: - Set the `gather_subset` option for the M(setup) task in the implicit fact gathering. See the module documentation for specifics. - "It does **not** apply to user defined M(setup) tasks." env: [{name: ANSIBLE_GATHER_SUBSET}] ini: - key: gather_subset section: defaults version_added: "2.1" DEFAULT_GATHER_TIMEOUT: name: Gather facts timeout default: 10 description: - Set the timeout in seconds for the implicit fact gathering. - "It does **not** apply to user defined M(setup) tasks." env: [{name: ANSIBLE_GATHER_TIMEOUT}] ini: - {key: gather_timeout, section: defaults} type: integer yaml: {key: defaults.gather_timeout} DEFAULT_HANDLER_INCLUDES_STATIC: name: Make handler M(include) static default: False description: - "Since 2.0 M(include) can be 'dynamic', this setting (if True) forces that if the include appears in a ``handlers`` section to be 'static'." env: [{name: ANSIBLE_HANDLER_INCLUDES_STATIC}] ini: - {key: handler_includes_static, section: defaults} type: boolean deprecated: why: include itself is deprecated and this setting will not matter in the future version: "2.8" alternatives: none as its already built into the decision between include_tasks and import_tasks DEFAULT_HASH_BEHAVIOUR: name: Hash merge behaviour default: replace type: string choices: ["replace", "merge"] description: - This setting controls how variables merge in Ansible. By default Ansible will override variables in specific precedence orders, as described in Variables. When a variable of higher precedence wins, it will replace the other value. - "Some users prefer that variables that are hashes (aka 'dictionaries' in Python terms) are merged. This setting is called 'merge'. This is not the default behavior and it does not affect variables whose values are scalars (integers, strings) or arrays. We generally recommend not using this setting unless you think you have an absolute need for it, and playbooks in the official examples repos do not use this setting" - In version 2.0 a ``combine`` filter was added to allow doing this for a particular variable (described in Filters). env: [{name: ANSIBLE_HASH_BEHAVIOUR}] ini: - {key: hash_behaviour, section: defaults} DEFAULT_HOST_LIST: name: Inventory Source default: /etc/ansible/hosts description: Colon separated list of Ansible inventory sources env: - name: ANSIBLE_HOSTS deprecated: why: The variable is misleading as it can be a list of hosts and/or paths to inventory sources version: "2.8" alternatives: ANSIBLE_INVENTORY - name: ANSIBLE_INVENTORY expand_relative_paths: True ini: - key: hostfile section: defaults deprecated: why: The key is misleading as it can also be a list of hosts, a directory or a list of paths version: "2.8" alternatives: "[defaults]\ninventory=/path/to/file|dir" - key: inventory section: defaults type: pathlist yaml: {key: defaults.inventory} DEFAULT_INTERNAL_POLL_INTERVAL: name: Internal poll interval default: 0.001 env: [] ini: - {key: internal_poll_interval, section: defaults} type: float version_added: "2.2" description: - This sets the interval (in seconds) of Ansible internal processes polling each other. Lower values improve performance with large playbooks at the expense of extra CPU load. Higher values are more suitable for Ansible usage in automation scenarios, when UI responsiveness is not required but CPU usage might be a concern. - "The default corresponds to the value hardcoded in Ansible <= 2.1" DEFAULT_INVENTORY_PLUGIN_PATH: name: Inventory Plugins Path default: ~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory description: Colon separated paths in which Ansible will search for Inventory Plugins. env: [{name: ANSIBLE_INVENTORY_PLUGINS}] ini: - {key: inventory_plugins, section: defaults} type: pathspec DEFAULT_JINJA2_EXTENSIONS: name: Enabled Jinja2 extensions default: [] description: - This is a developer-specific feature that allows enabling additional Jinja2 extensions. - "See the Jinja2 documentation for details. If you do not know what these do, you probably don't need to change this setting :)" env: [{name: ANSIBLE_JINJA2_EXTENSIONS}] ini: - {key: jinja2_extensions, section: defaults} DEFAULT_KEEP_REMOTE_FILES: name: Keep remote files default: False description: Enables/disables the cleaning up of the temporary files Ansible used to execute the tasks on the remote. env: [{name: ANSIBLE_KEEP_REMOTE_FILES}] ini: - {key: keep_remote_files, section: defaults} type: boolean DEFAULT_LIBVIRT_LXC_NOSECLABEL: # TODO: move to plugin name: No security label on Lxc default: False description: - "This setting causes libvirt to connect to lxc containers by passing --noseclabel to virsh. This is necessary when running on systems which do not have SELinux." env: [{name: LIBVIRT_LXC_NOSECLABEL}] ini: - {key: libvirt_lxc_noseclabel, section: selinux} type: boolean version_added: "2.1" DEFAULT_LOAD_CALLBACK_PLUGINS: name: Load callbacks for adhoc default: False description: - Controls whether callback plugins are loaded when running /usr/bin/ansible. This may be used to log activity from the command line, send notifications, and so on. Callback plugins are always loaded for ``ansible-playbook``. env: [{name: ANSIBLE_LOAD_CALLBACK_PLUGINS}] ini: - {key: bin_ansible_callbacks, section: defaults} type: boolean version_added: "1.8" DEFAULT_LOCAL_TMP: name: Controller temporary directory default: ~/.ansible/tmp description: Temporary directory for Ansible to use on the controller. env: [{name: ANSIBLE_LOCAL_TEMP}] ini: - {key: local_tmp, section: defaults} type: tmppath DEFAULT_LOG_PATH: name: Ansible log file path default: '' description: File to which Ansible will log on the controller. When empty logging is disabled. env: [{name: ANSIBLE_LOG_PATH}] ini: - {key: log_path, section: defaults} type: path DEFAULT_LOG_FILTER: name: Name filters for python logger default: [] description: List of logger names to filter out of the log file env: [{name: ANSIBLE_LOG_FILTER}] ini: - {key: log_filter, section: defaults} type: list DEFAULT_LOOKUP_PLUGIN_PATH: name: Lookup Plugins Path description: Colon separated paths in which Ansible will search for Lookup Plugins. default: ~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup env: [{name: ANSIBLE_LOOKUP_PLUGINS}] ini: - {key: lookup_plugins, section: defaults} type: pathspec yaml: {key: defaults.lookup_plugins} DEFAULT_MANAGED_STR: name: Ansible managed default: 'Ansible managed' description: Sets the macro for the 'ansible_managed' variable available for M(template) tasks. env: [] ini: - {key: ansible_managed, section: defaults} yaml: {key: defaults.ansible_managed} DEFAULT_MODULE_ARGS: name: Adhoc default arguments default: '' description: - This sets the default arguments to pass to the ``ansible`` adhoc binary if no ``-a`` is specified. env: [{name: ANSIBLE_MODULE_ARGS}] ini: - {key: module_args, section: defaults} DEFAULT_MODULE_COMPRESSION: name: Python module compression default: ZIP_DEFLATED description: Compression scheme to use when transfering Python modules to the target. env: [] ini: - {key: module_compression, section: defaults} # vars: # - name: ansible_module_compression DEFAULT_MODULE_LANG: name: Target language environment default: "{{ CONTROLLER_LANG }}" description: - "Language locale setting to use for modules when they execute on the target." - "If empty it tries to set itself to the LANG environment variable on the controller." - "This is only used if DEFAULT_MODULE_SET_LOCALE is set to true" env: [{name: ANSIBLE_MODULE_LANG}] ini: - {key: module_lang, section: defaults} deprecated: why: Modules are coded to set their own locale if needed for screenscraping version: "2.9" DEFAULT_MODULE_NAME: name: Default adhoc module default: command description: "Module to use with the ``ansible`` AdHoc command, if none is specified via ``-m``." env: [] ini: - {key: module_name, section: defaults} DEFAULT_MODULE_PATH: name: Modules Path description: Colon separated paths in which Ansible will search for Modules. default: ~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules env: [{name: ANSIBLE_LIBRARY}] ini: - {key: library, section: defaults} type: pathspec DEFAULT_MODULE_SET_LOCALE: name: Target locale default: False description: - Controls if we set locale for modules when executing on the target. env: [{name: ANSIBLE_MODULE_SET_LOCALE}] ini: - {key: module_set_locale, section: defaults} type: boolean deprecated: why: Modules are coded to set their own locale if needed for screenscraping version: "2.9" DEFAULT_MODULE_UTILS_PATH: name: Module Utils Path description: Colon separated paths in which Ansible will search for Module utils files, which are shared by modules. default: ~/.ansible/plugins/module_utils:/usr/share/ansible/plugins/module_utils env: [{name: ANSIBLE_MODULE_UTILS}] ini: - {key: module_utils, section: defaults} type: pathspec DEFAULT_NO_LOG: name: No log default: False description: "Toggle Ansible's display and logging of task details, mainly used to avoid security disclosures." env: [{name: ANSIBLE_NO_LOG}] ini: - {key: no_log, section: defaults} type: boolean DEFAULT_NO_TARGET_SYSLOG: name: No syslog on target default: False description: Toggle Ansible logging to syslog on the target when it executes tasks. env: [{name: ANSIBLE_NO_TARGET_SYSLOG}] ini: - {key: no_target_syslog, section: defaults} type: boolean yaml: {key: defaults.no_target_syslog} DEFAULT_NULL_REPRESENTATION: name: Represent a null default: ~ description: What templating should return as a 'null' value. When not set it will let Jinja2 decide. env: [{name: ANSIBLE_NULL_REPRESENTATION}] ini: - {key: null_representation, section: defaults} type: none DEFAULT_POLL_INTERVAL: name: Async poll interval default: 15 description: - For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling), this is how often to check back on the status of those tasks when an explicit poll interval is not supplied. The default is a reasonably moderate 15 seconds which is a tradeoff between checking in frequently and providing a quick turnaround when something may have completed. env: [{name: ANSIBLE_POLL_INTERVAL}] ini: - {key: poll_interval, section: defaults} type: integer DEFAULT_PRIVATE_KEY_FILE: name: Private key file default: ~ description: - Option for connections using a certificate or key file to authenticate, rather than an agent or passwords, you can set the default value here to avoid re-specifying --private-key with every invocation. env: [{name: ANSIBLE_PRIVATE_KEY_FILE}] ini: - {key: private_key_file, section: defaults} type: path DEFAULT_PRIVATE_ROLE_VARS: name: Private role variables default: False description: - Makes role variables inaccessible from other roles. - This was introduced as a way to reset role variables to default values if a role is used more than once in a playbook. env: [{name: ANSIBLE_PRIVATE_ROLE_VARS}] ini: - {key: private_role_vars, section: defaults} type: boolean yaml: {key: defaults.private_role_vars} DEFAULT_REMOTE_PORT: name: Remote port default: ~ description: Port to use in remote connections, when blank it will use the connection plugin default. env: [{name: ANSIBLE_REMOTE_PORT}] ini: - {key: remote_port, section: defaults} type: integer yaml: {key: defaults.remote_port} DEFAULT_REMOTE_USER: name: Login/Remote User default: description: - Sets the login user for the target machines - "When blank it uses the connection plugin's default, normally the user currently executing Ansible." env: [{name: ANSIBLE_REMOTE_USER}] ini: - {key: remote_user, section: defaults} DEFAULT_ROLES_PATH: name: Roles path default: ~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles description: Colon separated paths in which Ansible will search for Roles. env: [{name: ANSIBLE_ROLES_PATH}] expand_relative_paths: True ini: - {key: roles_path, section: defaults} type: pathspec yaml: {key: defaults.roles_path} DEFAULT_SCP_IF_SSH: # TODO: move to ssh plugin default: smart description: - "Prefered method to use when transfering files over ssh" - When set to smart, Ansible will try them until one succeeds or they all fail - If set to True, it will force 'scp', if False it will use 'sftp' env: [{name: ANSIBLE_SCP_IF_SSH}] ini: - {key: scp_if_ssh, section: ssh_connection} DEFAULT_SELINUX_SPECIAL_FS: name: Problematic file systems default: fuse, nfs, vboxsf, ramfs, 9p description: - "Some filesystems do not support safe operations and/or return inconsistent errors, this setting makes Ansible 'tolerate' those in the list w/o causing fatal errors." - Data corruption may occur and writes are not always verified when a filesystem is in the list. env: [] ini: - {key: special_context_filesystems, section: selinux} type: list DEFAULT_SFTP_BATCH_MODE: # TODO: move to ssh plugin default: True description: 'TODO: write it' env: [{name: ANSIBLE_SFTP_BATCH_MODE}] ini: - {key: sftp_batch_mode, section: ssh_connection} type: boolean yaml: {key: ssh_connection.sftp_batch_mode} DEFAULT_SQUASH_ACTIONS: name: Squashable actions default: apk, apt, dnf, homebrew, openbsd_pkg, pacman, pkgng, yum, zypper description: - Ansible can optimise actions that call modules that support list parameters when using ``with_`` looping. Instead of calling the module once for each item, the module is called once with the full list. - The default value for this setting is only for certain package managers, but it can be used for any module - Currently, this is only supported for modules that have a name or pkg parameter, and only when the item is the only thing being passed to the parameter. env: [{name: ANSIBLE_SQUASH_ACTIONS}] ini: - {key: squash_actions, section: defaults} type: list version_added: "2.0" DEFAULT_SSH_TRANSFER_METHOD: # TODO: move to ssh plugin default: description: 'unused?' # - "Prefered method to use when transfering files over ssh" # - Setting to smart will try them until one succeeds or they all fail #choices: ['sftp', 'scp', 'dd', 'smart'] env: [{name: ANSIBLE_SSH_TRANSFER_METHOD}] ini: - {key: transfer_method, section: ssh_connection} DEFAULT_STDOUT_CALLBACK: name: Main display callback plugin default: default description: - "Set the main callback used to display Ansible output, you can only have one at a time." - You can have many other callbacks, but just one can be in charge of stdout. env: [{name: ANSIBLE_STDOUT_CALLBACK}] ini: - {key: stdout_callback, section: defaults} ENABLE_TASK_DEBUGGER: name: Whether to enable the task debugger default: False description: - Whether or not to enable the task debugger, this previously was done as a strategy plugin. - Now all strategy plugins can inherit this behavior. The debugger defaults to activating when - a task is failed on unreachable. Use the debugger keyword for more flexibility. type: boolean env: [{name: ANSIBLE_ENABLE_TASK_DEBUGGER}] ini: - {key: enable_task_debugger, section: defaults} version_added: "2.5" DEFAULT_STRATEGY: name: Implied strategy default: 'linear' description: Set the default strategy used for plays. env: [{name: ANSIBLE_STRATEGY}] ini: - {key: strategy, section: defaults} version_added: "2.3" DEFAULT_STRATEGY_PLUGIN_PATH: name: Strategy Plugins Path description: Colon separated paths in which Ansible will search for Strategy Plugins. default: ~/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy env: [{name: ANSIBLE_STRATEGY_PLUGINS}] ini: - {key: strategy_plugins, section: defaults} type: pathspec DEFAULT_SU: default: False description: 'Toggle the use of "su" for tasks.' env: [{name: ANSIBLE_SU}] ini: - {key: su, section: defaults} type: boolean yaml: {key: defaults.su} DEFAULT_SUDO: default: False deprecated: why: In favor of Ansible Become, which is a generic framework version: "2.8" alternatives: become description: 'Toggle the use of "sudo" for tasks.' env: [{name: ANSIBLE_SUDO}] ini: - {key: sudo, section: defaults} type: boolean DEFAULT_SUDO_EXE: name: sudo executable default: sudo deprecated: why: In favor of Ansible Become, which is a generic framework. See become_exe. version: "2.8" alternatives: become description: 'specify an "sudo" executable, otherwise it relies on PATH.' env: [{name: ANSIBLE_SUDO_EXE}] ini: - {key: sudo_exe, section: defaults} DEFAULT_SUDO_FLAGS: name: sudo flags default: '-H -S -n' deprecated: why: In favor of Ansible Become, which is a generic framework. See become_flags. version: "2.8" alternatives: become description: 'Flags to pass to "sudo"' env: [{name: ANSIBLE_SUDO_FLAGS}] ini: - {key: sudo_flags, section: defaults} DEFAULT_SUDO_USER: name: sudo user default: deprecated: why: In favor of Ansible Become, which is a generic framework. See become_user. version: "2.8" alternatives: become description: 'User you become when using "sudo", leaving it blank will use the default configured on the target (normally root)' env: [{name: ANSIBLE_SUDO_USER}] ini: - {key: sudo_user, section: defaults} DEFAULT_SU_EXE: name: su executable default: su deprecated: why: In favor of Ansible Become, which is a generic framework. See become_exe. version: "2.8" alternatives: become description: 'specify an "su" executable, otherwise it relies on PATH.' env: [{name: ANSIBLE_SU_EXE}] ini: - {key: su_exe, section: defaults} DEFAULT_SU_FLAGS: name: su flags default: '' deprecated: why: In favor of Ansible Become, which is a generic framework. See become_flags. version: "2.8" alternatives: become description: 'Flags to pass to su' env: [{name: ANSIBLE_SU_FLAGS}] ini: - {key: su_flags, section: defaults} DEFAULT_SU_USER: name: su user default: description: 'User you become when using "su", leaving it blank will use the default configured on the target (normally root)' env: [{name: ANSIBLE_SU_USER}] ini: - {key: su_user, section: defaults} deprecated: why: In favor of Ansible Become, which is a generic framework. See become_user. version: "2.8" alternatives: become DEFAULT_SYSLOG_FACILITY: name: syslog facility default: LOG_USER description: Syslog facility to use when Ansible logs to the remote target env: [{name: ANSIBLE_SYSLOG_FACILITY}] ini: - {key: syslog_facility, section: defaults} DEFAULT_TASK_INCLUDES_STATIC: name: Task include static default: False description: - The `include` tasks can be static or dynamic, this toggles the default expected behaviour if autodetection fails and it is not explicitly set in task. env: [{name: ANSIBLE_TASK_INCLUDES_STATIC}] ini: - {key: task_includes_static, section: defaults} type: boolean version_added: "2.1" deprecated: why: include itself is deprecated and this setting will not matter in the future version: "2.8" alternatives: None, as its already built into the decision between include_tasks and import_tasks DEFAULT_TEST_PLUGIN_PATH: name: Jinja2 Test Plugins Path description: Colon separated paths in which Ansible will search for Jinja2 Test Plugins. default: ~/.ansible/plugins/test:/usr/share/ansible/plugins/test env: [{name: ANSIBLE_TEST_PLUGINS}] ini: - {key: test_plugins, section: defaults} type: pathspec DEFAULT_TIMEOUT: name: Connection timeout default: 10 description: This is the default timeout for connection plugins to use. env: [{name: ANSIBLE_TIMEOUT}] ini: - {key: timeout, section: defaults} type: integer DEFAULT_TRANSPORT: name: Connection plugin default: smart description: "Default connection plugin to use, the 'smart' option will toggle between 'ssh' and 'paramiko' depending on controller OS and ssh versions" env: [{name: ANSIBLE_TRANSPORT}] ini: - {key: transport, section: defaults} DEFAULT_UNDEFINED_VAR_BEHAVIOR: name: Jinja2 fail on undefined default: True version_added: "1.3" description: - When True, this causes ansible templating to fail steps that reference variable names that are likely typoed. - "Otherwise, any '{{ template_expression }}' that contains undefined variables will be rendered in a template or ansible action line exactly as written." env: [{name: ANSIBLE_ERROR_ON_UNDEFINED_VARS}] ini: - {key: error_on_undefined_vars, section: defaults} type: boolean DEFAULT_VARS_PLUGIN_PATH: name: Vars Plugins Path default: ~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars description: Colon separated paths in which Ansible will search for Vars Plugins. env: [{name: ANSIBLE_VARS_PLUGINS}] ini: - {key: vars_plugins, section: defaults} type: pathspec # TODO: unused? #DEFAULT_VAR_COMPRESSION_LEVEL: # default: 0 # description: 'TODO: write it' # env: [{name: ANSIBLE_VAR_COMPRESSION_LEVEL}] # ini: # - {key: var_compression_level, section: defaults} # type: integer # yaml: {key: defaults.var_compression_level} DEFAULT_VAULT_ID_MATCH: name: Force vault id match default: False description: 'If true, decrypting vaults with a vault id will only try the password from the matching vault-id' env: [{name: ANSIBLE_VAULT_ID_MATCH}] ini: - {key: vault_id_match, section: defaults} yaml: {key: defaults.vault_id_match} DEFAULT_VAULT_IDENTITY: name: Vault id label default: default description: 'The label to use for the default vault id label in cases where a vault id label is not provided' env: [{name: ANSIBLE_VAULT_IDENTITY}] ini: - {key: vault_identity, section: defaults} yaml: {key: defaults.vault_identity} DEFAULT_VAULT_ENCRYPT_IDENTITY: name: Vault id to use for encryption default: description: 'The vault_id to use for encrypting by default. If multiple vault_ids are provided, this specifies which to use for encryption. The --encrypt-vault-id cli option overrides the configured value.' env: [{name: ANSIBLE_VAULT_ENCRYPT_IDENTITY}] ini: - {key: vault_encrypt_identity, section: defaults} yaml: {key: defaults.vault_encrypt_identity} DEFAULT_VAULT_IDENTITY_LIST: name: Default vault ids default: [] description: 'A list of vault-ids to use by default. Equivalent to multiple --vault-id args. Vault-ids are tried in order.' env: [{name: ANSIBLE_VAULT_IDENTITY_LIST}] ini: - {key: vault_identity_list, section: defaults} type: list yaml: {key: defaults.vault_identity_list} DEFAULT_VAULT_PASSWORD_FILE: name: Vault password file default: ~ description: 'The vault password file to use. Equivalent to --vault-password-file or --vault-id' env: [{name: ANSIBLE_VAULT_PASSWORD_FILE}] ini: - {key: vault_password_file, section: defaults} type: path yaml: {key: defaults.vault_password_file} DEFAULT_VERBOSITY: name: Verbosity default: 0 description: Sets the default verbosity, equivalent to the number of ``-v`` passed in the command line. env: [{name: ANSIBLE_VERBOSITY}] ini: - {key: verbosity, section: defaults} type: integer DEPRECATION_WARNINGS: name: Deprecation messages default: True description: "Toggle to control the showing of deprecation warnings" env: [{name: ANSIBLE_DEPRECATION_WARNINGS}] ini: - {key: deprecation_warnings, section: defaults} type: boolean DIFF_ALWAYS: name: Show differences default: False description: Configuration toggle to tell modules to show differences when in 'changed' status, equivalent to ``--diff``. env: [{name: ANSIBLE_DIFF_ALWAYS}] ini: - {key: always, section: diff} type: bool DIFF_CONTEXT: name: Difference context default: 3 description: How many lines of context to show when displaying the differences between files. env: [{name: ANSIBLE_DIFF_CONTEXT}] ini: - {key: context, section: diff} type: integer DISPLAY_ARGS_TO_STDOUT: name: Show task arguments default: False description: - "Normally ``ansible-playbook`` will print a header for each task that is run. These headers will contain the name: field from the task if you specified one. If you didn't then ``ansible-playbook`` uses the task's action to help you tell which task is presently running. Sometimes you run many of the same action and so you want more information about the task to differentiate it from others of the same action. If you set this variable to True in the config then ``ansible-playbook`` will also include the task's arguments in the header." - "This setting defaults to False because there is a chance that you have sensitive values in your parameters and you do not want those to be printed." - "If you set this to True you should be sure that you have secured your environment's stdout (no one can shoulder surf your screen and you aren't saving stdout to an insecure file) or made sure that all of your playbooks explicitly added the ``no_log: True`` parameter to tasks which have sensistive values See How do I keep secret data in my playbook? for more information." env: [{name: ANSIBLE_DISPLAY_ARGS_TO_STDOUT}] ini: - {key: display_args_to_stdout, section: defaults} type: boolean version_added: "2.1" DISPLAY_SKIPPED_HOSTS: name: Show skipped results default: True description: "Toggle to control displaying skipped task/host entries in a task in the default callback" env: [{name: DISPLAY_SKIPPED_HOSTS}] ini: - {key: display_skipped_hosts, section: defaults} type: boolean ERROR_ON_MISSING_HANDLER: name: Missing handler error default: True description: "Toggle to allow missing handlers to become a warning instead of an error when notifying." env: [{name: ANSIBLE_ERROR_ON_MISSING_HANDLER}] ini: - {key: error_on_missing_handler, section: defaults} type: boolean GALAXY_IGNORE_CERTS: name: Galaxy validate certs default: False description: - If set to yes, ansible-galaxy will not validate TLS certificates. This can be useful for testing against a server with a self-signed certificate. env: [{name: ANSIBLE_GALAXY_IGNORE}] ini: - {key: ignore_certs, section: galaxy} type: boolean GALAXY_ROLE_SKELETON: name: Galaxy skeleton direcotry default: description: Role skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy``, same as ``--role-skeleton``. env: [{name: ANSIBLE_GALAXY_ROLE_SKELETON}] ini: - {key: role_skeleton, section: galaxy} type: path GALAXY_ROLE_SKELETON_IGNORE: name: Galaxy skeleton ignore default: ["^.git$", "^.*/.git_keep$"] description: patterns of files to ignore inside a galaxy role skeleton directory env: [{name: ANSIBLE_GALAXY_ROLE_SKELETON_IGNORE}] ini: - {key: role_skeleton_ignore, section: galaxy} type: list # TODO: unused? #GALAXY_SCMS: # name: Galaxy SCMS # default: git, hg # description: Available galaxy source control management systems. # env: [{name: ANSIBLE_GALAXY_SCMS}] # ini: # - {key: scms, section: galaxy} # type: list GALAXY_SERVER: default: https://galaxy.ansible.com description: "URL to prepend when roles don't specify the full URI, assume they are referencing this server as the source." env: [{name: ANSIBLE_GALAXY_SERVER}] ini: - {key: server, section: galaxy} yaml: {key: galaxy.server} GALAXY_TOKEN: default: null description: "GitHub personal access token" env: [{name: ANSIBLE_GALAXY_TOKEN}] ini: - {key: token, section: galaxy} yaml: {key: galaxy.token} HOST_KEY_CHECKING: name: Check host keys default: True description: 'Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host' env: [{name: ANSIBLE_HOST_KEY_CHECKING}] ini: - {key: host_key_checking, section: defaults} type: boolean INVENTORY_ENABLED: name: Active Inventory plugins default: ['host_list', 'script', 'yaml', 'ini', 'auto'] description: List of enabled inventory plugins, it also determines the order in which they are used. env: [{name: ANSIBLE_INVENTORY_ENABLED}] ini: - {key: enable_plugins, section: inventory} type: list INVENTORY_EXPORT: name: Set ansible-inventory into export mode default: False description: Controls if ansible-inventory will accurately reflect Ansible's view into inventory or its optimized for exporting. env: [{name: ANSIBLE_INVENTORY_EXPORT}] ini: - {key: export, section: inventory} type: bool INVENTORY_IGNORE_EXTS: name: Inventory ignore extensions default: "{{(BLACKLIST_EXTS + ( '~', '.orig', '.ini', '.cfg', '.retry'))}}" description: List of extensions to ignore when using a directory as an inventory source env: [{name: ANSIBLE_INVENTORY_IGNORE}] ini: - {key: inventory_ignore_extensions, section: defaults} - {key: ignore_extensions, section: inventory} type: list INVENTORY_IGNORE_PATTERNS: name: Inventory ignore patterns default: [] description: List of patterns to ignore when using a directory as an inventory source env: [{name: ANSIBLE_INVENTORY_IGNORE_REGEX}] ini: - {key: inventory_ignore_patterns, section: defaults} - {key: ignore_patterns, section: inventory} type: list INVENTORY_UNPARSED_IS_FAILED: name: Unparsed Inventory failure default: False description: If 'true' unparsed inventory sources become fatal errors, they are warnings otherwise. env: [{name: ANSIBLE_INVENTORY_UNPARSED_FAILED}] ini: - {key: unparsed_is_failed, section: inventory} type: bool MAX_FILE_SIZE_FOR_DIFF: name: Diff maxiumum file size default: 104448 description: Maximum size of files to be considered for diff display env: [{name: ANSIBLE_MAX_DIFF_SIZE}] ini: - {key: max_diff_size, section: defaults} type: int MERGE_MULTIPLE_CLI_TAGS: name: Merge 'tags' options default: True description: - "This allows changing how multiple --tags and --skip-tags arguments are handled on the command line. In Ansible up to and including 2.3, specifying --tags more than once will only take the last value of --tags." - "Setting this config value to True will mean that all of the --tags options will be merged together. The same holds true for --skip-tags." env: [{name: ANSIBLE_MERGE_MULTIPLE_CLI_TAGS}] ini: - {key: merge_multiple_cli_tags, section: defaults} type: bool version_added: "2.3" NETWORK_GROUP_MODULES: name: Network module families default: [eos, nxos, ios, iosxr, junos, enos, ce, vyos, sros, dellos9, dellos10, dellos6, asa, aruba, aireos, bigip, ironware, onyx] description: 'TODO: write it' env: [{name: NETWORK_GROUP_MODULES}] ini: - {key: network_group_modules, section: defaults} type: list yaml: {key: defaults.network_group_modules} INJECT_FACTS_AS_VARS: default: True description: - Facts are available inside the `ansible_facts` variable, this setting also pushes them as their own vars in the main namespace. - Unlike inside the `ansible_facts` dictionary, these will have an `ansible_` prefix. env: [{name: ANSIBLE_INJECT_FACT_VARS}] ini: - {key: inject_facts_as_vars, section: defaults} type: boolean version_added: "2.5" PARAMIKO_HOST_KEY_AUTO_ADD: # TODO: move to plugin default: False description: 'TODO: write it' env: [{name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD}] ini: - {key: host_key_auto_add, section: paramiko_connection} type: boolean PARAMIKO_LOOK_FOR_KEYS: name: look for keys default: True description: 'TODO: write it' env: [{name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS}] ini: - {key: look_for_keys, section: paramiko_connection} type: boolean PERSISTENT_CONTROL_PATH_DIR: name: Persistence socket path default: ~/.ansible/pc description: Path to socket to be used by the connection persistence system. env: [{name: ANSIBLE_PERSISTENT_CONTROL_PATH_DIR}] ini: - {key: control_path_dir, section: persistent_connection} type: path PERSISTENT_CONNECT_TIMEOUT: name: Persistence timeout default: 30 description: This controls how long the persistent connection will remain idle before it is destroyed. env: [{name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT}] ini: - {key: connect_timeout, section: persistent_connection} type: integer PERSISTENT_CONNECT_RETRY_TIMEOUT: name: Persistence connection retry timeout default: 15 description: This contorls the retry timeout for presistent connection to connect to the local domain socket. env: [{name: ANSIBLE_PERSISTENT_CONNECT_RETRY_TIMEOUT}] ini: - {key: connect_retry_timeout, section: persistent_connection} type: integer PERSISTENT_COMMAND_TIMEOUT: name: Persistence command timeout default: 10 description: This controls the amount of time to wait for response from remote device before timing out presistent connection. env: [{name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT}] ini: - {key: command_timeout, section: persistent_connection} type: int PLAYBOOK_VARS_ROOT: name: playbook vars files root default: top version_added: "2.4.1" description: - This sets which playbook dirs will be used as a root to process vars plugins, which includes finding host_vars/group_vars - The ``top`` option follows the traditional behaviour of using the top playbook in the chain to find the root directory. - The ``bottom`` option follows the 2.4.0 behaviour of using the current playbook to find the root directory. - The ``all`` option examines from the first parent to the current playbook. env: [{name: ANSIBLE_PLAYBOOK_VARS_ROOT}] ini: - {key: playbook_vars_root, section: defaults} choices: [ top, bottom, all ] PLUGIN_FILTERS_CFG: name: Config file for limiting valid plugins default: null version_added: "2.5.0" description: - "A path to configuration for filtering which plugins installed on the system are allowed to be used." - "See :doc:`plugin_filtering_config` for details of the filter file's format." - " The default is /etc/ansible/plugin_filters.yml" ini: - key: plugin_filters_cfg section: default RETRY_FILES_ENABLED: name: Retry files default: True description: This controls whether a failed Ansible playbook should create a .retry file. env: [{name: ANSIBLE_RETRY_FILES_ENABLED}] ini: - {key: retry_files_enabled, section: defaults} type: bool RETRY_FILES_SAVE_PATH: name: Retry files path default: ~ description: This sets the path in which Ansible will save .retry files when a playbook fails and retry files are enabled. env: [{name: ANSIBLE_RETRY_FILES_SAVE_PATH}] ini: - {key: retry_files_save_path, section: defaults} type: path SHOW_CUSTOM_STATS: name: Display custom stats default: False description: 'This adds the custom stats set via the set_stats plugin to the default output' env: [{name: ANSIBLE_SHOW_CUSTOM_STATS}] ini: - {key: show_custom_stats, section: defaults} type: bool STRING_TYPE_FILTERS: name: Filters to preserve strings default: [string, to_json, to_nice_json, to_yaml, ppretty, json] description: - "This list of filters avoids 'type conversion' when templating variables" - Useful when you want to avoid conversion into lists or dictionaries for JSON strings, for example. env: [{name: ANSIBLE_STRING_TYPE_FILTERS}] ini: - {key: dont_type_filters, section: jinja2} type: list SYSTEM_WARNINGS: name: System warnings default: True description: - Allows disabling of warnings related to potential issues on the system running ansible itself (not on the managed hosts) - These may include warnings about 3rd party packages or other conditions that should be resolved if possible. env: [{name: ANSIBLE_SYSTEM_WARNINGS}] ini: - {key: system_warnings, section: defaults} type: boolean TAGS_RUN: name: Run Tags default: [] type: list description: default list of tags to run in your plays, Skip Tags has precedence. env: [{name: ANSIBLE_RUN_TAGS}] ini: - {key: run, section: tags} TAGS_SKIP: name: Skip Tags default: [] type: list description: default list of tags to skip in your plays, has precedence over Run Tags env: [{name: ANSIBLE_SKIP_TAGS}] ini: - {key: skip, section: tags} USE_PERSISTENT_CONNECTIONS: name: Persistence default: False description: Toggles the use of persistence for connections. env: [{name: ANSIBLE_USE_PERSISTENT_CONNECTIONS}] ini: - {key: use_persistent_connections, section: defaults} type: boolean VARIABLE_PRECEDENCE: name: Group variable precedence default: ['all_inventory', 'groups_inventory', 'all_plugins_inventory', 'all_plugins_play', 'groups_plugins_inventory', 'groups_plugins_play'] description: Allows to change the group variable precedence merge order. env: [{name: ANSIBLE_PRECEDENCE}] ini: - {key: precedence, section: defaults} type: list version_added: "2.4" YAML_FILENAME_EXTENSIONS: name: Valid YAML extensions default: [".yml", ".yaml", ".json"] description: - "Check all of these extensions when looking for 'variable' files which should be YAML or JSON or vaulted versions of these." - 'This affects vars_files, include_vars, inventory and vars plugins among others.' env: - name: ANSIBLE_YAML_FILENAME_EXT ini: - section: defaults key: yaml_valid_extensions type: list ... ansible-2.5.1/lib/ansible/config/data.py0000644000000000000000000000276613265756155020037 0ustar rootroot00000000000000# Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type class ConfigData(object): def __init__(self): self._global_settings = {} self._plugins = {} def get_setting(self, name, plugin=None): setting = None if plugin is None: setting = self._global_settings.get(name) elif plugin.type in self._plugins and plugin.name in self._plugins[plugin.type]: setting = self._plugins[plugin.type][plugin.name].get(name) return setting def get_settings(self, plugin=None): settings = [] if plugin is None: settings = [self._global_settings[k] for k in self._global_settings] elif plugin.type in self._plugins and plugin.name in self._plugins[plugin.type]: settings = [self._plugins[plugin.type][plugin.name][k] for k in self._plugins[plugin.type][plugin.name]] return settings def update_setting(self, setting, plugin=None): if plugin is None: self._global_settings[setting.name] = setting else: if plugin.type not in self._plugins: self._plugins[plugin.type] = {} if plugin.name not in self._plugins[plugin.type]: self._plugins[plugin.type][plugin.name] = {} self._plugins[plugin.type][plugin.name][setting.name] = setting ansible-2.5.1/lib/ansible/config/manager.py0000644000000000000000000003566713265756155020546 0ustar rootroot00000000000000# Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import sys import tempfile from collections import namedtuple from yaml import load as yaml_load try: # use C version if possible for speedup from yaml import CSafeLoader as SafeLoader except ImportError: from yaml import SafeLoader from ansible.config.data import ConfigData from ansible.errors import AnsibleOptionsError, AnsibleError from ansible.module_utils.six import string_types from ansible.module_utils.six.moves import configparser from ansible.module_utils._text import to_text, to_bytes, to_native from ansible.module_utils.parsing.convert_bool import boolean from ansible.parsing.quoting import unquote from ansible.utils.path import unfrackpath from ansible.utils.path import makedirs_safe Plugin = namedtuple('Plugin', 'name type') Setting = namedtuple('Setting', 'name value origin type') # FIXME: see if we can unify in module_utils with similar function used by argspec def ensure_type(value, value_type, origin=None): ''' return a configuration variable with casting :arg value: The value to ensure correct typing of :kwarg value_type: The type of the value. This can be any of the following strings: :boolean: sets the value to a True or False value :integer: Sets the value to an integer or raises a ValueType error :float: Sets the value to a float or raises a ValueType error :list: Treats the value as a comma separated list. Split the value and return it as a python list. :none: Sets the value to None :path: Expands any environment variables and tilde's in the value. :tmp_path: Create a unique temporary directory inside of the directory specified by value and return its path. :pathlist: Treat the value as a typical PATH string. (On POSIX, this means colon separated strings.) Split the value and then expand each part for environment variables and tildes. ''' basedir = None if origin and os.path.isabs(origin) and os.path.exists(origin): basedir = origin if value_type: value_type = value_type.lower() if value_type in ('boolean', 'bool'): value = boolean(value, strict=False) elif value: if value_type in ('integer', 'int'): value = int(value) elif value_type == 'float': value = float(value) elif value_type == 'list': if isinstance(value, string_types): value = [x.strip() for x in value.split(',')] elif value_type == 'none': if value == "None": value = None elif value_type == 'path': value = resolve_path(value, basedir=basedir) elif value_type in ('tmp', 'temppath', 'tmppath'): value = resolve_path(value, basedir=basedir) if not os.path.exists(value): makedirs_safe(value, 0o700) prefix = 'ansible-local-%s' % os.getpid() value = tempfile.mkdtemp(prefix=prefix, dir=value) elif value_type == 'pathspec': if isinstance(value, string_types): value = value.split(os.pathsep) value = [resolve_path(x, basedir=basedir) for x in value] elif value_type == 'pathlist': if isinstance(value, string_types): value = value.split(',') value = [resolve_path(x, basedir=basedir) for x in value] # defaults to string types elif isinstance(value, string_types): value = unquote(value) return to_text(value, errors='surrogate_or_strict', nonstring='passthru') # FIXME: see if this can live in utils/path def resolve_path(path, basedir=None): ''' resolve relative or 'varaible' paths ''' if '{{CWD}}' in path: # allow users to force CWD using 'magic' {{CWD}} path = path.replace('{{CWD}}', os.getcwd()) return unfrackpath(path, follow=False, basedir=basedir) # FIXME: generic file type? def get_config_type(cfile): ftype = None if cfile is not None: ext = os.path.splitext(cfile)[-1] if ext in ('.ini', '.cfg'): ftype = 'ini' elif ext in ('.yaml', '.yml'): ftype = 'yaml' else: raise AnsibleOptionsError("Unsupported configuration file extension for %s: %s" % (cfile, to_native(ext))) return ftype # FIXME: can move to module_utils for use for ini plugins also? def get_ini_config_value(p, entry): ''' returns the value of last ini entry found ''' value = None if p is not None: try: value = p.get(entry.get('section', 'defaults'), entry.get('key', ''), raw=True) except Exception: # FIXME: actually report issues here pass return value def find_ini_config_file(): ''' Load INI Config File order(first found is used): ENV, CWD, HOME, /etc/ansible ''' # FIXME: eventually deprecate ini configs path0 = os.getenv("ANSIBLE_CONFIG", None) if path0 is not None: path0 = unfrackpath(path0, follow=False) if os.path.isdir(path0): path0 += "/ansible.cfg" try: path1 = os.getcwd() + "/ansible.cfg" except OSError: path1 = None path2 = unfrackpath("~/.ansible.cfg", follow=False) path3 = "/etc/ansible/ansible.cfg" for path in [path0, path1, path2, path3]: if path is not None and os.path.exists(path): break else: path = None return path class ConfigManager(object): UNABLE = [] DEPRECATED = [] def __init__(self, conf_file=None, defs_file=None): self._base_defs = {} self._plugins = {} self._parser = None self._config_file = conf_file self.data = ConfigData() if defs_file is None: # Create configuration definitions from source b_defs_file = to_bytes('%s/base.yml' % os.path.dirname(__file__)) else: b_defs_file = to_bytes(defs_file) # consume definitions if os.path.exists(b_defs_file): with open(b_defs_file, 'rb') as config_def: self._base_defs = yaml_load(config_def, Loader=SafeLoader) else: raise AnsibleError("Missing base configuration definition file (bad install?): %s" % to_native(b_defs_file)) if self._config_file is None: # set config using ini self._config_file = find_ini_config_file() # consume configuration if self._config_file: if os.path.exists(self._config_file): # initialize parser and read config self._parse_config_file() # update constants self.update_config_data() def _parse_config_file(self, cfile=None): ''' return flat configuration settings from file(s) ''' # TODO: take list of files with merge/nomerge if cfile is None: cfile = self._config_file ftype = get_config_type(cfile) if cfile is not None: if ftype == 'ini': self._parser = configparser.ConfigParser() try: self._parser.read(cfile) except configparser.Error as e: raise AnsibleOptionsError("Error reading config file (%s): %s" % (cfile, to_native(e))) # FIXME: this should eventually handle yaml config files # elif ftype == 'yaml': # with open(cfile, 'rb') as config_stream: # self._parser = yaml.safe_load(config_stream) else: raise AnsibleOptionsError("Unsupported configuration file type: %s" % to_native(ftype)) def _find_yaml_config_files(self): ''' Load YAML Config Files in order, check merge flags, keep origin of settings''' pass def get_plugin_options(self, plugin_type, name, keys=None, variables=None): options = {} defs = self.get_configuration_definitions(plugin_type, name) for option in defs: options[option] = self.get_config_value(option, plugin_type=plugin_type, plugin_name=name, keys=keys, variables=variables) return options def get_plugin_vars(self, plugin_type, name): pvars = [] for pdef in self.get_configuration_definitions(plugin_type, name).values(): if 'vars' in pdef and pdef['vars']: for var_entry in pdef['vars']: pvars.append(var_entry['name']) return pvars def get_configuration_definitions(self, plugin_type=None, name=None): ''' just list the possible settings, either base or for specific plugins or plugin ''' ret = {} if plugin_type is None: ret = self._base_defs elif name is None: ret = self._plugins.get(plugin_type, {}) else: ret = self._plugins.get(plugin_type, {}).get(name, {}) return ret def _loop_entries(self, container, entry_list): ''' repeat code for value entry assignment ''' value = None origin = None for entry in entry_list: name = entry.get('name') temp_value = container.get(name, None) if temp_value is not None: # only set if env var is defined value = temp_value origin = name # deal with deprecation of setting source, if used if 'deprecated' in entry: self.DEPRECATED.append((entry['name'], entry['deprecated'])) return value, origin def get_config_value(self, config, cfile=None, plugin_type=None, plugin_name=None, keys=None, variables=None): ''' wrapper ''' value, _drop = self.get_config_value_and_origin(config, cfile=cfile, plugin_type=plugin_type, plugin_name=plugin_name, keys=keys, variables=variables) return value def get_config_value_and_origin(self, config, cfile=None, plugin_type=None, plugin_name=None, keys=None, variables=None): ''' Given a config key figure out the actual value and report on the origin of the settings ''' if cfile is None: cfile = self._config_file else: self._parse_config_file(cfile) # Note: sources that are lists listed in low to high precedence (last one wins) value = None origin = None defs = {} if plugin_type is None: defs = self._base_defs elif plugin_name is None: defs = self._plugins[plugin_type] else: defs = self._plugins[plugin_type][plugin_name] if config in defs: # Use 'variable overrides' if present, highest precedence, but only present when querying running play if variables and defs[config].get('vars'): value, origin = self._loop_entries(variables, defs[config]['vars']) origin = 'var: %s' % origin # use playbook keywords if you have em if value is None and keys: value, origin = self._loop_entries(keys, defs[config]['keywords']) origin = 'keyword: %s' % origin # env vars are next precedence if value is None and defs[config].get('env'): value, origin = self._loop_entries(os.environ, defs[config]['env']) origin = 'env: %s' % origin # try config file entries next, if we have one if value is None and cfile is not None: ftype = get_config_type(cfile) if ftype and defs[config].get(ftype): if ftype == 'ini': # load from ini config try: # FIXME: generalize _loop_entries to allow for files also, most of this code is dupe for ini_entry in defs[config]['ini']: temp_value = get_ini_config_value(self._parser, ini_entry) if temp_value is not None: value = temp_value origin = cfile if 'deprecated' in ini_entry: self.DEPRECATED.append(('[%s]%s' % (ini_entry['section'], ini_entry['key']), ini_entry['deprecated'])) except Exception as e: sys.stderr.write("Error while loading ini config %s: %s" % (cfile, to_native(e))) elif ftype == 'yaml': # FIXME: implement, also , break down key from defs (. notation???) origin = cfile # set default if we got here w/o a value if value is None: value = defs[config].get('default') origin = 'default' # skip typing as this is a temlated default that will be resolved later in constants, which has needed vars if plugin_type is None and isinstance(value, string_types) and (value.startswith('{{') and value.endswith('}}')): return value, origin # ensure correct type try: value = ensure_type(value, defs[config].get('type'), origin=origin) except Exception as e: self.UNABLE.append(config) # deal with deprecation of the setting if 'deprecated' in defs[config] and origin != 'default': self.DEPRECATED.append((config, defs[config].get('deprecated'))) else: raise AnsibleError('Requested option %s was not defined in configuration' % to_native(config)) return value, origin def initialize_plugin_configuration_definitions(self, plugin_type, name, defs): if plugin_type not in self._plugins: self._plugins[plugin_type] = {} self._plugins[plugin_type][name] = defs def update_config_data(self, defs=None, configfile=None): ''' really: update constants ''' if defs is None: defs = self._base_defs if configfile is None: configfile = self._config_file if not isinstance(defs, dict): raise AnsibleOptionsError("Invalid configuration definition type: %s for %s" % (type(defs), defs)) # update the constant for config file self.data.update_setting(Setting('CONFIG_FILE', configfile, '', 'string')) origin = None # env and config defs can have several entries, ordered in list from lowest to highest precedence for config in defs: if not isinstance(defs[config], dict): raise AnsibleOptionsError("Invalid configuration definition '%s': type is %s" % (to_native(config), type(defs[config]))) # get value and origin value, origin = self.get_config_value_and_origin(config, configfile) # set the constant self.data.update_setting(Setting(config, value, origin, defs[config].get('type', 'string'))) ansible-2.5.1/lib/ansible/errors/0000755000000000000000000000000013265756221016602 5ustar rootroot00000000000000ansible-2.5.1/lib/ansible/errors/__init__.py0000644000000000000000000002622013265756155020723 0ustar rootroot00000000000000# (c) 2012-2014, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from collections import Sequence import traceback import sys from ansible.errors.yaml_strings import ( YAML_COMMON_DICT_ERROR, YAML_COMMON_LEADING_TAB_ERROR, YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR, YAML_COMMON_UNBALANCED_QUOTES_ERROR, YAML_COMMON_UNQUOTED_COLON_ERROR, YAML_COMMON_UNQUOTED_VARIABLE_ERROR, YAML_POSITION_DETAILS, ) from ansible.module_utils._text import to_native, to_text class AnsibleError(Exception): ''' This is the base class for all errors raised from Ansible code, and can be instantiated with two optional parameters beyond the error message to control whether detailed information is displayed when the error occurred while parsing a data file of some kind. Usage: raise AnsibleError('some message here', obj=obj, show_content=True) Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject, which should be returned by the DataLoader() class. ''' def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None): super(AnsibleError, self).__init__(message) # we import this here to prevent an import loop problem, # since the objects code also imports ansible.errors from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject self._obj = obj self._show_content = show_content if obj and isinstance(obj, AnsibleBaseYAMLObject): extended_error = self._get_extended_error() if extended_error and not suppress_extended_error: self.message = '%s\n\n%s' % (to_native(message), to_native(extended_error)) else: self.message = '%s' % to_native(message) else: self.message = '%s' % to_native(message) if orig_exc: self.orig_exc = orig_exc self.tb = ''.join(traceback.format_tb(sys.exc_info()[2])) def __str__(self): return self.message def __repr__(self): return self.message def _get_error_lines_from_file(self, file_name, line_number): ''' Returns the line in the file which corresponds to the reported error location, as well as the line preceding it (if the error did not occur on the first line), to provide context to the error. ''' target_line = '' prev_line = '' with open(file_name, 'r') as f: lines = f.readlines() target_line = lines[line_number] if line_number > 0: prev_line = lines[line_number - 1] return (target_line, prev_line) def _get_extended_error(self): ''' Given an object reporting the location of the exception in a file, return detailed information regarding it including: * the line which caused the error as well as the one preceding it * causes and suggested remedies for common syntax errors If this error was created with show_content=False, the reporting of content is suppressed, as the file contents may be sensitive (ie. vault data). ''' error_message = '' try: (src_file, line_number, col_number) = self._obj.ansible_pos error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number) if src_file not in ('', '') and self._show_content: (target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1) target_line = to_text(target_line) prev_line = to_text(prev_line) if target_line: stripped_line = target_line.replace(" ", "") arrow_line = (" " * (col_number - 1)) + "^ here" # header_line = ("=" * 73) error_message += "\nThe offending line appears to be:\n\n%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line) # TODO: There may be cases where there is a valid tab in a line that has other errors. if '\t' in target_line: error_message += YAML_COMMON_LEADING_TAB_ERROR # common error/remediation checking here: # check for unquoted vars starting lines if ('{{' in target_line and '}}' in target_line) and ('"{{' not in target_line or "'{{" not in target_line): error_message += YAML_COMMON_UNQUOTED_VARIABLE_ERROR # check for common dictionary mistakes elif ":{{" in stripped_line and "}}" in stripped_line: error_message += YAML_COMMON_DICT_ERROR # check for common unquoted colon mistakes elif (len(target_line) and len(target_line) > 1 and len(target_line) > col_number and target_line[col_number] == ":" and target_line.count(':') > 1): error_message += YAML_COMMON_UNQUOTED_COLON_ERROR # otherwise, check for some common quoting mistakes else: parts = target_line.split(":") if len(parts) > 1: middle = parts[1].strip() match = False unbalanced = False if middle.startswith("'") and not middle.endswith("'"): match = True elif middle.startswith('"') and not middle.endswith('"'): match = True if (len(middle) > 0 and middle[0] in ['"', "'"] and middle[-1] in ['"', "'"] and target_line.count("'") > 2 or target_line.count('"') > 2): unbalanced = True if match: error_message += YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR if unbalanced: error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR except (IOError, TypeError): error_message += '\n(could not open file to display line)' except IndexError: error_message += '\n(specified line no longer in file, maybe it changed?)' return error_message class AnsibleAssertionError(AnsibleError, AssertionError): '''Invalid assertion''' pass class AnsibleOptionsError(AnsibleError): ''' bad or incomplete options passed ''' pass class AnsibleParserError(AnsibleError): ''' something was detected early that is wrong about a playbook or data file ''' pass class AnsibleInternalError(AnsibleError): ''' internal safeguards tripped, something happened in the code that should never happen ''' pass class AnsibleRuntimeError(AnsibleError): ''' ansible had a problem while running a playbook ''' pass class AnsibleModuleError(AnsibleRuntimeError): ''' a module failed somehow ''' pass class AnsibleConnectionFailure(AnsibleRuntimeError): ''' the transport / connection_plugin had a fatal error ''' pass class AnsibleFilterError(AnsibleRuntimeError): ''' a templating failure ''' pass class AnsibleLookupError(AnsibleRuntimeError): ''' a lookup failure ''' pass class AnsibleCallbackError(AnsibleRuntimeError): ''' a callback failure ''' pass class AnsibleUndefinedVariable(AnsibleRuntimeError): ''' a templating failure ''' pass class AnsibleFileNotFound(AnsibleRuntimeError): ''' a file missing failure ''' def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, paths=None, file_name=None): self.file_name = file_name self.paths = paths if self.file_name: if message: message += "\n" message += "Could not find or access '%s'" % to_text(self.file_name) if self.paths and isinstance(self.paths, Sequence): searched = to_text('\n\t'.join(self.paths)) if message: message += "\n" message += "Searched in:\n\t%s" % searched super(AnsibleFileNotFound, self).__init__(message=message, obj=obj, show_content=show_content, suppress_extended_error=suppress_extended_error, orig_exc=orig_exc) # These Exceptions are temporary, using them as flow control until we can get a better solution. # DO NOT USE as they will probably be removed soon. # We will port the action modules in our tree to use a context manager instead. class AnsibleAction(AnsibleRuntimeError): ''' Base Exception for Action plugin flow control ''' def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None): super(AnsibleAction, self).__init__(message=message, obj=obj, show_content=show_content, suppress_extended_error=suppress_extended_error, orig_exc=orig_exc) if result is None: self.result = {} else: self.result = result class AnsibleActionSkip(AnsibleAction): ''' an action runtime skip''' def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None): super(AnsibleActionSkip, self).__init__(message=message, obj=obj, show_content=show_content, suppress_extended_error=suppress_extended_error, orig_exc=orig_exc, result=result) self.result.update({'skipped': True, 'msg': message}) class AnsibleActionFail(AnsibleAction): ''' an action runtime failure''' def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None): super(AnsibleActionFail, self).__init__(message=message, obj=obj, show_content=show_content, suppress_extended_error=suppress_extended_error, orig_exc=orig_exc, result=result) self.result.update({'failed': True, 'msg': message}) class _AnsibleActionDone(AnsibleAction): ''' an action runtime early exit''' pass ansible-2.5.1/lib/ansible/errors/yaml_strings.py0000644000000000000000000000735413265756155021706 0ustar rootroot00000000000000# (c) 2012-2014, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type __all__ = [ 'YAML_SYNTAX_ERROR', 'YAML_POSITION_DETAILS', 'YAML_COMMON_DICT_ERROR', 'YAML_COMMON_UNQUOTED_VARIABLE_ERROR', 'YAML_COMMON_UNQUOTED_COLON_ERROR', 'YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR', 'YAML_COMMON_UNBALANCED_QUOTES_ERROR', ] YAML_SYNTAX_ERROR = """\ Syntax Error while loading YAML. %s""" YAML_POSITION_DETAILS = """\ The error appears to have been in '%s': line %s, column %s, but may be elsewhere in the file depending on the exact syntax problem. """ YAML_COMMON_DICT_ERROR = """\ This one looks easy to fix. YAML thought it was looking for the start of a hash/dictionary and was confused to see a second "{". Most likely this was meant to be an ansible template evaluation instead, so we have to give the parser a small hint that we wanted a string instead. The solution here is to just quote the entire value. For instance, if the original line was: app_path: {{ base_path }}/foo It should be written as: app_path: "{{ base_path }}/foo" """ YAML_COMMON_UNQUOTED_VARIABLE_ERROR = """\ We could be wrong, but this one looks like it might be an issue with missing quotes. Always quote template expression brackets when they start a value. For instance: with_items: - {{ foo }} Should be written as: with_items: - "{{ foo }}" """ YAML_COMMON_UNQUOTED_COLON_ERROR = """\ This one looks easy to fix. There seems to be an extra unquoted colon in the line and this is confusing the parser. It was only expecting to find one free colon. The solution is just add some quotes around the colon, or quote the entire line after the first colon. For instance, if the original line was: copy: src=file.txt dest=/path/filename:with_colon.txt It can be written as: copy: src=file.txt dest='/path/filename:with_colon.txt' Or: copy: 'src=file.txt dest=/path/filename:with_colon.txt' """ YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR = """\ This one looks easy to fix. It seems that there is a value started with a quote, and the YAML parser is expecting to see the line ended with the same kind of quote. For instance: when: "ok" in result.stdout Could be written as: when: '"ok" in result.stdout' Or equivalently: when: "'ok' in result.stdout" """ YAML_COMMON_UNBALANCED_QUOTES_ERROR = """\ We could be wrong, but this one looks like it might be an issue with unbalanced quotes. If starting a value with a quote, make sure the line ends with the same set of quotes. For instance this arbitrary example: foo: "bad" "wolf" Could be written as: foo: '"bad" "wolf"' """ YAML_COMMON_LEADING_TAB_ERROR = """\ There appears to be a tab character at the start of the line. YAML does not use tabs for formatting. Tabs should be replaced with spaces. For example: - name: update tooling vars: version: 1.2.3 # ^--- there is a tab there. Should be written as: - name: update tooling vars: version: 1.2.3 # ^--- all spaces here. """ ansible-2.5.1/lib/ansible/executor/0000755000000000000000000000000013265756221017124 5ustar rootroot00000000000000ansible-2.5.1/lib/ansible/executor/process/0000755000000000000000000000000013265756221020602 5ustar rootroot00000000000000ansible-2.5.1/lib/ansible/executor/process/__init__.py0000644000000000000000000000150113265756155022716 0ustar rootroot00000000000000# (c) 2012-2014, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type ansible-2.5.1/lib/ansible/executor/process/worker.py0000644000000000000000000001413013265756155022472 0ustar rootroot00000000000000# (c) 2012-2014, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import multiprocessing import os import sys import traceback from jinja2.exceptions import TemplateNotFound HAS_PYCRYPTO_ATFORK = False try: from Crypto.Random import atfork HAS_PYCRYPTO_ATFORK = True except: # We only need to call atfork if pycrypto is used because it will need to # reinitialize its RNG. Since old paramiko could be using pycrypto, we # need to take charge of calling it. pass from ansible.errors import AnsibleConnectionFailure from ansible.executor.task_executor import TaskExecutor from ansible.executor.task_result import TaskResult from ansible.module_utils._text import to_text try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() __all__ = ['WorkerProcess'] class WorkerProcess(multiprocessing.Process): ''' The worker thread class, which uses TaskExecutor to run tasks read from a job queue and pushes results into a results queue for reading later. ''' def __init__(self, rslt_q, task_vars, host, task, play_context, loader, variable_manager, shared_loader_obj): super(WorkerProcess, self).__init__() # takes a task queue manager as the sole param: self._rslt_q = rslt_q self._task_vars = task_vars self._host = host self._task = task self._play_context = play_context self._loader = loader self._variable_manager = variable_manager self._shared_loader_obj = shared_loader_obj if sys.stdin.isatty(): # dupe stdin, if we have one self._new_stdin = sys.stdin try: fileno = sys.stdin.fileno() if fileno is not None: try: self._new_stdin = os.fdopen(os.dup(fileno)) except OSError: # couldn't dupe stdin, most likely because it's # not a valid file descriptor, so we just rely on # using the one that was passed in pass except (AttributeError, ValueError): # couldn't get stdin's fileno, so we just carry on pass else: # set to /dev/null self._new_stdin = os.devnull def run(self): ''' Called when the process is started. Pushes the result onto the results queue. We also remove the host from the blocked hosts list, to signify that they are ready for their next task. ''' # import cProfile, pstats, StringIO # pr = cProfile.Profile() # pr.enable() if HAS_PYCRYPTO_ATFORK: atfork() try: # execute the task and build a TaskResult from the result display.debug("running TaskExecutor() for %s/%s" % (self._host, self._task)) executor_result = TaskExecutor( self._host, self._task, self._task_vars, self._play_context, self._new_stdin, self._loader, self._shared_loader_obj, self._rslt_q ).run() display.debug("done running TaskExecutor() for %s/%s [%s]" % (self._host, self._task, self._task._uuid)) self._host.vars = dict() self._host.groups = [] task_result = TaskResult( self._host.name, self._task._uuid, executor_result, task_fields=self._task.dump_attrs(), ) # put the result on the result queue display.debug("sending task result for task %s" % self._task._uuid) self._rslt_q.put(task_result) display.debug("done sending task result for task %s" % self._task._uuid) except AnsibleConnectionFailure: self._host.vars = dict() self._host.groups = [] task_result = TaskResult( self._host.name, self._task._uuid, dict(unreachable=True), task_fields=self._task.dump_attrs(), ) self._rslt_q.put(task_result, block=False) except Exception as e: if not isinstance(e, (IOError, EOFError, KeyboardInterrupt, SystemExit)) or isinstance(e, TemplateNotFound): try: self._host.vars = dict() self._host.groups = [] task_result = TaskResult( self._host.name, self._task._uuid, dict(failed=True, exception=to_text(traceback.format_exc()), stdout=''), task_fields=self._task.dump_attrs(), ) self._rslt_q.put(task_result, block=False) except: display.debug(u"WORKER EXCEPTION: %s" % to_text(e)) display.debug(u"WORKER TRACEBACK: %s" % to_text(traceback.format_exc())) display.debug("WORKER PROCESS EXITING") # pr.disable() # s = StringIO.StringIO() # sortby = 'time' # ps = pstats.Stats(pr, stream=s).sort_stats(sortby) # ps.print_stats() # with open('worker_%06d.stats' % os.getpid(), 'w') as f: # f.write(s.getvalue()) ansible-2.5.1/lib/ansible/executor/__init__.py0000644000000000000000000000150113265756155021240 0ustar rootroot00000000000000# (c) 2012-2014, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type ansible-2.5.1/lib/ansible/executor/action_write_locks.py0000644000000000000000000000340313265756155023366 0ustar rootroot00000000000000# (c) 2016 - Red Hat, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from multiprocessing import Lock from ansible.module_utils.facts.system.pkg_mgr import PKG_MGRS if 'action_write_locks' not in globals(): # Do not initialize this more than once because it seems to bash # the existing one. multiprocessing must be reloading the module # when it forks? action_write_locks = dict() # Below is a Lock for use when we weren't expecting a named module. # It gets used when an action plugin directly invokes a module instead # of going through the strategies. Slightly less efficient as all # processes with unexpected module names will wait on this lock action_write_locks[None] = Lock() # These plugins are called directly by action plugins (not going through # a strategy). We precreate them here as an optimization mods = set(p['name'] for p in PKG_MGRS) mods.update(('copy', 'file', 'setup', 'slurp', 'stat')) for mod_name in mods: action_write_locks[mod_name] = Lock() ansible-2.5.1/lib/ansible/executor/module_common.py0000644000000000000000000012357713265756155022360 0ustar rootroot00000000000000# (c) 2013-2014, Michael DeHaan # (c) 2015 Toshio Kuratomi # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import ast import base64 import datetime import imp import json import os import shlex import zipfile import random import re from io import BytesIO from ansible.release import __version__, __author__ from ansible import constants as C from ansible.errors import AnsibleError from ansible.module_utils._text import to_bytes, to_text, to_native from ansible.plugins.loader import module_utils_loader, ps_module_utils_loader from ansible.plugins.shell.powershell import async_watchdog, async_wrapper, become_wrapper, leaf_exec, exec_wrapper # Must import strategy and use write_locks from there # If we import write_locks directly then we end up binding a # variable to the object and then it never gets updated. from ansible.executor import action_write_locks try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() REPLACER = b"#<>" REPLACER_VERSION = b"\"<>\"" REPLACER_COMPLEX = b"\"<>\"" REPLACER_WINDOWS = b"# POWERSHELL_COMMON" REPLACER_JSONARGS = b"<>" REPLACER_SELINUX = b"<>" # We could end up writing out parameters with unicode characters so we need to # specify an encoding for the python source file ENCODING_STRING = u'# -*- coding: utf-8 -*-' b_ENCODING_STRING = b'# -*- coding: utf-8 -*-' # module_common is relative to module_utils, so fix the path _MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils') # ****************************************************************************** ANSIBALLZ_TEMPLATE = u'''%(shebang)s %(coding)s ANSIBALLZ_WRAPPER = True # For test-module script to tell this is a ANSIBALLZ_WRAPPER # This code is part of Ansible, but is an independent component. # The code in this particular templatable string, and this templatable string # only, is BSD licensed. Modules which end up using this snippet, which is # dynamically combined together by Ansible still belong to the author of the # module, and they may assign their own license to the complete work. # # Copyright (c), James Cammarata, 2016 # Copyright (c), Toshio Kuratomi, 2016 # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import os.path import sys import __main__ # For some distros and python versions we pick up this script in the temporary # directory. This leads to problems when the ansible module masks a python # library that another import needs. We have not figured out what about the # specific distros and python versions causes this to behave differently. # # Tested distros: # Fedora23 with python3.4 Works # Ubuntu15.10 with python2.7 Works # Ubuntu15.10 with python3.4 Fails without this # Ubuntu16.04.1 with python3.5 Fails without this # To test on another platform: # * use the copy module (since this shadows the stdlib copy module) # * Turn off pipelining # * Make sure that the destination file does not exist # * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m' # This will traceback in shutil. Looking at the complete traceback will show # that shutil is importing copy which finds the ansible module instead of the # stdlib module scriptdir = None try: scriptdir = os.path.dirname(os.path.realpath(__main__.__file__)) except (AttributeError, OSError): # Some platforms don't set __file__ when reading from stdin # OSX raises OSError if using abspath() in a directory we don't have # permission to read (realpath calls abspath) pass if scriptdir is not None: sys.path = [p for p in sys.path if p != scriptdir] import base64 import shutil import zipfile import tempfile import subprocess if sys.version_info < (3,): bytes = str PY3 = False else: unicode = str PY3 = True try: # Python-2.6+ from io import BytesIO as IOStream except ImportError: # Python < 2.6 from StringIO import StringIO as IOStream ZIPDATA = """%(zipdata)s""" def invoke_module(module, modlib_path, json_params): pythonpath = os.environ.get('PYTHONPATH') if pythonpath: os.environ['PYTHONPATH'] = ':'.join((modlib_path, pythonpath)) else: os.environ['PYTHONPATH'] = modlib_path p = subprocess.Popen([%(interpreter)s, module], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) (stdout, stderr) = p.communicate(json_params) if not isinstance(stderr, (bytes, unicode)): stderr = stderr.read() if not isinstance(stdout, (bytes, unicode)): stdout = stdout.read() if PY3: sys.stderr.buffer.write(stderr) sys.stdout.buffer.write(stdout) else: sys.stderr.write(stderr) sys.stdout.write(stdout) return p.returncode def debug(command, zipped_mod, json_params): # The code here normally doesn't run. It's only used for debugging on the # remote machine. # # The subcommands in this function make it easier to debug ansiballz # modules. Here's the basic steps: # # Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv # to save the module file remotely:: # $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv # # Part of the verbose output will tell you where on the remote machine the # module was written to:: # [...] # SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o # PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o # ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 # LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"'' # [...] # # Login to the remote machine and run the module file via from the previous # step with the explode subcommand to extract the module payload into # source files:: # $ ssh host1 # $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode # Module expanded into: # /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible # # You can now edit the source files to instrument the code or experiment with # different parameter values. When you're ready to run the code you've modified # (instead of the code from the actual zipped module), use the execute subcommand like this:: # $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute # Okay to use __file__ here because we're running from a kept file basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir') args_path = os.path.join(basedir, 'args') script_path = os.path.join(basedir, 'ansible_module_%(ansible_module)s.py') if command == 'explode': # transform the ZIPDATA into an exploded directory of code and then # print the path to the code. This is an easy way for people to look # at the code on the remote machine for debugging it in that # environment z = zipfile.ZipFile(zipped_mod) for filename in z.namelist(): if filename.startswith('/'): raise Exception('Something wrong with this module zip file: should not contain absolute paths') dest_filename = os.path.join(basedir, filename) if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename): os.makedirs(dest_filename) else: directory = os.path.dirname(dest_filename) if not os.path.exists(directory): os.makedirs(directory) f = open(dest_filename, 'wb') f.write(z.read(filename)) f.close() # write the args file f = open(args_path, 'wb') f.write(json_params) f.close() print('Module expanded into:') print('%%s' %% basedir) exitcode = 0 elif command == 'execute': # Execute the exploded code instead of executing the module from the # embedded ZIPDATA. This allows people to easily run their modified # code on the remote machine to see how changes will affect it. # This differs slightly from default Ansible execution of Python modules # as it passes the arguments to the module via a file instead of stdin. # Set pythonpath to the debug dir pythonpath = os.environ.get('PYTHONPATH') if pythonpath: os.environ['PYTHONPATH'] = ':'.join((basedir, pythonpath)) else: os.environ['PYTHONPATH'] = basedir p = subprocess.Popen([%(interpreter)s, script_path, args_path], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) (stdout, stderr) = p.communicate() if not isinstance(stderr, (bytes, unicode)): stderr = stderr.read() if not isinstance(stdout, (bytes, unicode)): stdout = stdout.read() if PY3: sys.stderr.buffer.write(stderr) sys.stdout.buffer.write(stdout) else: sys.stderr.write(stderr) sys.stdout.write(stdout) return p.returncode elif command == 'excommunicate': # This attempts to run the module in-process (by importing a main # function and then calling it). It is not the way ansible generally # invokes the module so it won't work in every case. It is here to # aid certain debuggers which work better when the code doesn't change # from one process to another but there may be problems that occur # when using this that are only artifacts of how we're invoking here, # not actual bugs (as they don't affect the real way that we invoke # ansible modules) # stub the args and python path sys.argv = ['%(ansible_module)s', args_path] sys.path.insert(0, basedir) from ansible_module_%(ansible_module)s import main main() print('WARNING: Module returned to wrapper instead of exiting') sys.exit(1) else: print('WARNING: Unknown debug command. Doing nothing.') exitcode = 0 return exitcode if __name__ == '__main__': # # See comments in the debug() method for information on debugging # ANSIBALLZ_PARAMS = %(params)s if PY3: ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8') try: # There's a race condition with the controller removing the # remote_tmpdir and this module executing under async. So we cannot # store this in remote_tmpdir (use system tempdir instead) temp_path = tempfile.mkdtemp(prefix='ansible_') zipped_mod = os.path.join(temp_path, 'ansible_modlib.zip') modlib = open(zipped_mod, 'wb') modlib.write(base64.b64decode(ZIPDATA)) modlib.close() if len(sys.argv) == 2: exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS) else: z = zipfile.ZipFile(zipped_mod, mode='r') module = os.path.join(temp_path, 'ansible_module_%(ansible_module)s.py') f = open(module, 'wb') f.write(z.read('ansible_module_%(ansible_module)s.py')) f.close() # When installed via setuptools (including python setup.py install), # ansible may be installed with an easy-install.pth file. That file # may load the system-wide install of ansible rather than the one in # the module. sitecustomize is the only way to override that setting. z = zipfile.ZipFile(zipped_mod, mode='a') # py3: zipped_mod will be text, py2: it's bytes. Need bytes at the end sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% zipped_mod sitecustomize = sitecustomize.encode('utf-8') # Use a ZipInfo to work around zipfile limitation on hosts with # clocks set to a pre-1980 year (for instance, Raspberry Pi) zinfo = zipfile.ZipInfo() zinfo.filename = 'sitecustomize.py' zinfo.date_time = ( %(year)i, %(month)i, %(day)i, %(hour)i, %(minute)i, %(second)i) z.writestr(zinfo, sitecustomize) z.close() exitcode = invoke_module(module, zipped_mod, ANSIBALLZ_PARAMS) finally: try: shutil.rmtree(temp_path) except (NameError, OSError): # tempdir creation probably failed pass sys.exit(exitcode) ''' def _strip_comments(source): # Strip comments and blank lines from the wrapper buf = [] for line in source.splitlines(): l = line.strip() if not l or l.startswith(u'#'): continue buf.append(line) return u'\n'.join(buf) if C.DEFAULT_KEEP_REMOTE_FILES: # Keep comments when KEEP_REMOTE_FILES is set. That way users will see # the comments with some nice usage instructions ACTIVE_ANSIBALLZ_TEMPLATE = ANSIBALLZ_TEMPLATE else: # ANSIBALLZ_TEMPLATE stripped of comments for smaller over the wire size ACTIVE_ANSIBALLZ_TEMPLATE = _strip_comments(ANSIBALLZ_TEMPLATE) class ModuleDepFinder(ast.NodeVisitor): # Caveats: # This code currently does not handle: # * relative imports from py2.6+ from . import urls IMPORT_PREFIX_SIZE = len('ansible.module_utils.') def __init__(self, *args, **kwargs): """ Walk the ast tree for the python module. Save submodule[.submoduleN][.identifier] into self.submodules self.submodules will end up with tuples like: - ('basic',) - ('urls', 'fetch_url') - ('database', 'postgres') - ('database', 'postgres', 'quote') It's up to calling code to determine whether the final element of the dotted strings are module names or something else (function, class, or variable names) """ super(ModuleDepFinder, self).__init__(*args, **kwargs) self.submodules = set() def visit_Import(self, node): # import ansible.module_utils.MODLIB[.MODLIBn] [as asname] for alias in (a for a in node.names if a.name.startswith('ansible.module_utils.')): py_mod = alias.name[self.IMPORT_PREFIX_SIZE:] py_mod = tuple(py_mod.split('.')) self.submodules.add(py_mod) self.generic_visit(node) def visit_ImportFrom(self, node): # Specialcase: six is a special case because of its # import logic if node.names[0].name == '_six': self.submodules.add(('_six',)) elif node.module.startswith('ansible.module_utils'): where_from = node.module[self.IMPORT_PREFIX_SIZE:] if where_from: # from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname] # from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname] # from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname] py_mod = tuple(where_from.split('.')) for alias in node.names: self.submodules.add(py_mod + (alias.name,)) else: # from ansible.module_utils import MODLIB [,MODLIB2] [as asname] for alias in node.names: self.submodules.add((alias.name,)) self.generic_visit(node) def _slurp(path): if not os.path.exists(path): raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path)) fd = open(path, 'rb') data = fd.read() fd.close() return data def _get_shebang(interpreter, task_vars, templar, args=tuple()): """ Note not stellar API: Returns None instead of always returning a shebang line. Doing it this way allows the caller to decide to use the shebang it read from the file rather than trust that we reformatted what they already have correctly. """ interpreter_config = u'ansible_%s_interpreter' % os.path.basename(interpreter).strip() if interpreter_config not in task_vars: return (None, interpreter) interpreter = templar.template(task_vars[interpreter_config].strip()) shebang = u'#!' + interpreter if args: shebang = shebang + u' ' + u' '.join(args) return (shebang, interpreter) def recursive_finder(name, data, py_module_names, py_module_cache, zf): """ Using ModuleDepFinder, make sure we have all of the module_utils files that the module its module_utils files needs. """ # Parse the module and find the imports of ansible.module_utils tree = ast.parse(data) finder = ModuleDepFinder() finder.visit(tree) # # Determine what imports that we've found are modules (vs class, function. # variable names) for packages # normalized_modules = set() # Loop through the imports that we've found to normalize them # Exclude paths that match with paths we've already processed # (Have to exclude them a second time once the paths are processed) module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)] module_utils_paths.append(_MODULE_UTILS_PATH) for py_module_name in finder.submodules.difference(py_module_names): module_info = None if py_module_name[0] == 'six': # Special case the python six library because it messes up the # import process in an incompatible way module_info = imp.find_module('six', module_utils_paths) py_module_name = ('six',) idx = 0 elif py_module_name[0] == '_six': # Special case the python six library because it messes up the # import process in an incompatible way module_info = imp.find_module('_six', [os.path.join(p, 'six') for p in module_utils_paths]) py_module_name = ('six', '_six') idx = 0 else: # Check whether either the last or the second to last identifier is # a module name for idx in (1, 2): if len(py_module_name) < idx: break try: module_info = imp.find_module(py_module_name[-idx], [os.path.join(p, *py_module_name[:-idx]) for p in module_utils_paths]) break except ImportError: continue # Could not find the module. Construct a helpful error message. if module_info is None: msg = ['Could not find imported module support code for %s. Looked for' % (name,)] if idx == 2: msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2])) else: msg.append(py_module_name[-1]) raise AnsibleError(' '.join(msg)) # Found a byte compiled file rather than source. We cannot send byte # compiled over the wire as the python version might be different. # imp.find_module seems to prefer to return source packages so we just # error out if imp.find_module returns byte compiled files (This is # fragile as it depends on undocumented imp.find_module behaviour) if module_info[2][2] not in (imp.PY_SOURCE, imp.PKG_DIRECTORY): msg = ['Could not find python source for imported module support code for %s. Looked for' % name] if idx == 2: msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2])) else: msg.append(py_module_name[-1]) raise AnsibleError(' '.join(msg)) if idx == 2: # We've determined that the last portion was an identifier and # thus, not part of the module name py_module_name = py_module_name[:-1] # If not already processed then we've got work to do if py_module_name not in py_module_names: # If not in the cache, then read the file into the cache # We already have a file handle for the module open so it makes # sense to read it now if py_module_name not in py_module_cache: if module_info[2][2] == imp.PKG_DIRECTORY: # Read the __init__.py instead of the module file as this is # a python package normalized_name = py_module_name + ('__init__',) normalized_path = os.path.join(os.path.join(module_info[1], '__init__.py')) normalized_data = _slurp(normalized_path) else: normalized_name = py_module_name normalized_path = module_info[1] normalized_data = module_info[0].read() module_info[0].close() py_module_cache[normalized_name] = (normalized_data, normalized_path) normalized_modules.add(normalized_name) # Make sure that all the packages that this module is a part of # are also added for i in range(1, len(py_module_name)): py_pkg_name = py_module_name[:-i] + ('__init__',) if py_pkg_name not in py_module_names: pkg_dir_info = imp.find_module(py_pkg_name[-1], [os.path.join(p, *py_pkg_name[:-1]) for p in module_utils_paths]) normalized_modules.add(py_pkg_name) py_module_cache[py_pkg_name] = (_slurp(pkg_dir_info[1]), pkg_dir_info[1]) # # iterate through all of the ansible.module_utils* imports that we haven't # already checked for new imports # # set of modules that we haven't added to the zipfile unprocessed_py_module_names = normalized_modules.difference(py_module_names) for py_module_name in unprocessed_py_module_names: py_module_path = os.path.join(*py_module_name) py_module_file_name = '%s.py' % py_module_path zf.writestr(os.path.join("ansible/module_utils", py_module_file_name), py_module_cache[py_module_name][0]) display.vvvvv("Using module_utils file %s" % py_module_cache[py_module_name][1]) # Add the names of the files we're scheduling to examine in the loop to # py_module_names so that we don't re-examine them in the next pass # through recursive_finder() py_module_names.update(unprocessed_py_module_names) for py_module_file in unprocessed_py_module_names: recursive_finder(py_module_file, py_module_cache[py_module_file][0], py_module_names, py_module_cache, zf) # Save memory; the file won't have to be read again for this ansible module. del py_module_cache[py_module_file] def _is_binary(b_module_data): textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f])) start = b_module_data[:1024] return bool(start.translate(None, textchars)) def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout, become, become_method, become_user, become_password, become_flags, environment): """ Given the source of the module, convert it to a Jinja2 template to insert module code and return whether it's a new or old style module. """ module_substyle = module_style = 'old' # module_style is something important to calling code (ActionBase). It # determines how arguments are formatted (json vs k=v) and whether # a separate arguments file needs to be sent over the wire. # module_substyle is extra information that's useful internally. It tells # us what we have to look to substitute in the module files and whether # we're using module replacer or ansiballz to format the module itself. if _is_binary(b_module_data): module_substyle = module_style = 'binary' elif REPLACER in b_module_data: # Do REPLACER before from ansible.module_utils because we need make sure # we substitute "from ansible.module_utils basic" for REPLACER module_style = 'new' module_substyle = 'python' b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *') elif b'from ansible.module_utils.' in b_module_data: module_style = 'new' module_substyle = 'python' elif REPLACER_WINDOWS in b_module_data: module_style = 'new' module_substyle = 'powershell' b_module_data = b_module_data.replace(REPLACER_WINDOWS, b'#Requires -Module Ansible.ModuleUtils.Legacy') elif re.search(b'#Requires -Module', b_module_data, re.IGNORECASE) \ or re.search(b'#Requires -Version', b_module_data, re.IGNORECASE)\ or re.search(b'#AnsibleRequires -OSVersion', b_module_data, re.IGNORECASE): module_style = 'new' module_substyle = 'powershell' elif REPLACER_JSONARGS in b_module_data: module_style = 'new' module_substyle = 'jsonargs' elif b'WANT_JSON' in b_module_data: module_substyle = module_style = 'non_native_want_json' shebang = None # Neither old-style, non_native_want_json nor binary modules should be modified # except for the shebang line (Done by modify_module) if module_style in ('old', 'non_native_want_json', 'binary'): return b_module_data, module_style, shebang output = BytesIO() py_module_names = set() if module_substyle == 'python': params = dict(ANSIBLE_MODULE_ARGS=module_args,) python_repred_params = repr(json.dumps(params)) try: compression_method = getattr(zipfile, module_compression) except AttributeError: display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression) compression_method = zipfile.ZIP_STORED lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache') cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression)) zipdata = None # Optimization -- don't lock if the module has already been cached if os.path.exists(cached_module_filename): display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename) zipdata = open(cached_module_filename, 'rb').read() else: if module_name in action_write_locks.action_write_locks: display.debug('ANSIBALLZ: Using lock for %s' % module_name) lock = action_write_locks.action_write_locks[module_name] else: # If the action plugin directly invokes the module (instead of # going through a strategy) then we don't have a cross-process # Lock specifically for this module. Use the "unexpected # module" lock instead display.debug('ANSIBALLZ: Using generic lock for %s' % module_name) lock = action_write_locks.action_write_locks[None] display.debug('ANSIBALLZ: Acquiring lock') with lock: display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock)) # Check that no other process has created this while we were # waiting for the lock if not os.path.exists(cached_module_filename): display.debug('ANSIBALLZ: Creating module') # Create the module zip data zipoutput = BytesIO() zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method) # Note: If we need to import from release.py first, # remember to catch all exceptions: https://github.com/ansible/ansible/issues/16523 zf.writestr('ansible/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n__version__="' + to_bytes(__version__) + b'"\n__author__="' + to_bytes(__author__) + b'"\n') zf.writestr('ansible/module_utils/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n') zf.writestr('ansible_module_%s.py' % module_name, b_module_data) py_module_cache = {('__init__',): (b'', '[builtin]')} recursive_finder(module_name, b_module_data, py_module_names, py_module_cache, zf) zf.close() zipdata = base64.b64encode(zipoutput.getvalue()) # Write the assembled module to a temp file (write to temp # so that no one looking for the file reads a partially # written file) if not os.path.exists(lookup_path): # Note -- if we have a global function to setup, that would # be a better place to run this os.makedirs(lookup_path) display.debug('ANSIBALLZ: Writing module') with open(cached_module_filename + '-part', 'wb') as f: f.write(zipdata) # Rename the file into its final position in the cache so # future users of this module can read it off the # filesystem instead of constructing from scratch. display.debug('ANSIBALLZ: Renaming module') os.rename(cached_module_filename + '-part', cached_module_filename) display.debug('ANSIBALLZ: Done creating module') if zipdata is None: display.debug('ANSIBALLZ: Reading module after lock') # Another process wrote the file while we were waiting for # the write lock. Go ahead and read the data from disk # instead of re-creating it. try: zipdata = open(cached_module_filename, 'rb').read() except IOError: raise AnsibleError('A different worker process failed to create module file. ' 'Look at traceback for that process for debugging information.') zipdata = to_text(zipdata, errors='surrogate_or_strict') shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars, templar) if shebang is None: shebang = u'#!/usr/bin/python' # Enclose the parts of the interpreter in quotes because we're # substituting it into the template as a Python string interpreter_parts = interpreter.split(u' ') interpreter = u"'{0}'".format(u"', '".join(interpreter_parts)) now = datetime.datetime.utcnow() output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict( zipdata=zipdata, ansible_module=module_name, params=python_repred_params, shebang=shebang, interpreter=interpreter, coding=ENCODING_STRING, year=now.year, month=now.month, day=now.day, hour=now.hour, minute=now.minute, second=now.second, ))) b_module_data = output.getvalue() elif module_substyle == 'powershell': # Powershell/winrm don't actually make use of shebang so we can # safely set this here. If we let the fallback code handle this # it can fail in the presence of the UTF8 BOM commonly added by # Windows text editors shebang = u'#!powershell' exec_manifest = dict( module_entry=to_text(base64.b64encode(b_module_data)), powershell_modules=dict(), module_args=module_args, actions=['exec'], environment=environment ) exec_manifest['exec'] = to_text(base64.b64encode(to_bytes(leaf_exec))) if async_timeout > 0: exec_manifest["actions"].insert(0, 'async_watchdog') exec_manifest["async_watchdog"] = to_text(base64.b64encode(to_bytes(async_watchdog))) exec_manifest["actions"].insert(0, 'async_wrapper') exec_manifest["async_wrapper"] = to_text(base64.b64encode(to_bytes(async_wrapper))) exec_manifest["async_jid"] = str(random.randint(0, 999999999999)) exec_manifest["async_timeout_sec"] = async_timeout if become and become_method == 'runas': exec_manifest["actions"].insert(0, 'become') exec_manifest["become_user"] = become_user exec_manifest["become_password"] = become_password exec_manifest['become_flags'] = become_flags exec_manifest["become"] = to_text(base64.b64encode(to_bytes(become_wrapper))) lines = b_module_data.split(b'\n') module_names = set() become_required = False min_os_version = None min_ps_version = None requires_module_list = re.compile(to_bytes(r'(?i)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)')) requires_ps_version = re.compile(to_bytes(r'(?i)^#requires\s+\-version\s+([0-9]+(\.[0-9]+){0,3})$')) requires_os_version = re.compile(to_bytes(r'(?i)^#ansiblerequires\s+\-osversion\s+([0-9]+(\.[0-9]+){0,3})$')) requires_become = re.compile(to_bytes(r'(?i)^#ansiblerequires\s+\-become$')) for line in lines: module_util_line_match = requires_module_list.match(line) if module_util_line_match: module_names.add(module_util_line_match.group(1)) requires_ps_version_match = requires_ps_version.match(line) if requires_ps_version_match: min_ps_version = to_text(requires_ps_version_match.group(1)) # Powershell cannot cast a string of "1" to version, it must # have at least the major.minor for it to work so we append 0 if requires_ps_version_match.group(2) is None: min_ps_version = "%s.0" % min_ps_version requires_os_version_match = requires_os_version.match(line) if requires_os_version_match: min_os_version = to_text(requires_os_version_match.group(1)) if requires_os_version_match.group(2) is None: min_os_version = "%s.0" % min_os_version requires_become_match = requires_become.match(line) if requires_become_match: become_required = True for m in set(module_names): m = to_text(m).rstrip() # tolerate windows line endings mu_path = ps_module_utils_loader.find_plugin(m, ".psm1") if not mu_path: raise AnsibleError('Could not find imported module support code for \'%s\'.' % m) exec_manifest["powershell_modules"][m] = to_text( base64.b64encode( to_bytes( _slurp(mu_path) ) ) ) exec_manifest['min_ps_version'] = min_ps_version exec_manifest['min_os_version'] = min_os_version if become_required and 'become' not in exec_manifest["actions"]: exec_manifest["actions"].insert(0, 'become') exec_manifest["become_user"] = "SYSTEM" exec_manifest["become_password"] = None exec_manifest['become_flags'] = None exec_manifest["become"] = to_text(base64.b64encode(to_bytes(become_wrapper))) # FUTURE: smuggle this back as a dict instead of serializing here; the connection plugin may need to modify it module_json = json.dumps(exec_manifest) b_module_data = exec_wrapper.replace(b"$json_raw = ''", b"$json_raw = @'\r\n%s\r\n'@" % to_bytes(module_json)) elif module_substyle == 'jsonargs': module_args_json = to_bytes(json.dumps(module_args)) # these strings could be included in a third-party module but # officially they were included in the 'basic' snippet for new-style # python modules (which has been replaced with something else in # ansiballz) If we remove them from jsonargs-style module replacer # then we can remove them everywhere. python_repred_args = to_bytes(repr(module_args_json)) b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__))) b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args) b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS))) # The main event -- substitute the JSON args string into the module b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json) facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict') b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility) return (b_module_data, module_style, shebang) def modify_module(module_name, module_path, module_args, templar, task_vars=None, module_compression='ZIP_STORED', async_timeout=0, become=False, become_method=None, become_user=None, become_password=None, become_flags=None, environment=None): """ Used to insert chunks of code into modules before transfer rather than doing regular python imports. This allows for more efficient transfer in a non-bootstrapping scenario by not moving extra files over the wire and also takes care of embedding arguments in the transferred modules. This version is done in such a way that local imports can still be used in the module code, so IDEs don't have to be aware of what is going on. Example: from ansible.module_utils.basic import * ... will result in the insertion of basic.py into the module from the module_utils/ directory in the source tree. For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of properties not available here. """ task_vars = {} if task_vars is None else task_vars environment = {} if environment is None else environment with open(module_path, 'rb') as f: # read in the module source b_module_data = f.read() (b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, templar, module_compression, async_timeout=async_timeout, become=become, become_method=become_method, become_user=become_user, become_password=become_password, become_flags=become_flags, environment=environment) if module_style == 'binary': return (b_module_data, module_style, to_text(shebang, nonstring='passthru')) elif shebang is None: b_lines = b_module_data.split(b"\n", 1) if b_lines[0].startswith(b"#!"): b_shebang = b_lines[0].strip() # shlex.split on python-2.6 needs bytes. On python-3.x it needs text args = shlex.split(to_native(b_shebang[2:], errors='surrogate_or_strict')) # _get_shebang() takes text strings args = [to_text(a, errors='surrogate_or_strict') for a in args] interpreter = args[0] b_new_shebang = to_bytes(_get_shebang(interpreter, task_vars, templar, args[1:])[0], errors='surrogate_or_strict', nonstring='passthru') if b_new_shebang: b_lines[0] = b_shebang = b_new_shebang if os.path.basename(interpreter).startswith(u'python'): b_lines.insert(1, b_ENCODING_STRING) shebang = to_text(b_shebang, nonstring='passthru', errors='surrogate_or_strict') else: # No shebang, assume a binary module? pass b_module_data = b"\n".join(b_lines) return (b_module_data, module_style, shebang) ansible-2.5.1/lib/ansible/executor/play_iterator.py0000644000000000000000000006343513265756155022375 0ustar rootroot00000000000000# (c) 2012-2014, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import fnmatch from ansible import constants as C from ansible.module_utils.six import iteritems from ansible.module_utils.parsing.convert_bool import boolean from ansible.playbook.block import Block from ansible.playbook.task import Task try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() __all__ = ['PlayIterator'] class HostState: def __init__(self, blocks): self._blocks = blocks[:] self.cur_block = 0 self.cur_regular_task = 0 self.cur_rescue_task = 0 self.cur_always_task = 0 self.cur_dep_chain = None self.run_state = PlayIterator.ITERATING_SETUP self.fail_state = PlayIterator.FAILED_NONE self.pending_setup = False self.tasks_child_state = None self.rescue_child_state = None self.always_child_state = None self.did_rescue = False self.did_start_at_task = False def __repr__(self): return "HostState(%r)" % self._blocks def __str__(self): def _run_state_to_string(n): states = ["ITERATING_SETUP", "ITERATING_TASKS", "ITERATING_RESCUE", "ITERATING_ALWAYS", "ITERATING_COMPLETE"] try: return states[n] except IndexError: return "UNKNOWN STATE" def _failed_state_to_string(n): states = {1: "FAILED_SETUP", 2: "FAILED_TASKS", 4: "FAILED_RESCUE", 8: "FAILED_ALWAYS"} if n == 0: return "FAILED_NONE" else: ret = [] for i in (1, 2, 4, 8): if n & i: ret.append(states[i]) return "|".join(ret) return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? (%s), " "rescue child state? (%s), always child state? (%s), did rescue? %s, did start at task? %s" % ( self.cur_block, self.cur_regular_task, self.cur_rescue_task, self.cur_always_task, _run_state_to_string(self.run_state), _failed_state_to_string(self.fail_state), self.pending_setup, self.tasks_child_state, self.rescue_child_state, self.always_child_state, self.did_rescue, self.did_start_at_task, )) def __eq__(self, other): if not isinstance(other, HostState): return False for attr in ('_blocks', 'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task', 'run_state', 'fail_state', 'pending_setup', 'cur_dep_chain', 'tasks_child_state', 'rescue_child_state', 'always_child_state'): if getattr(self, attr) != getattr(other, attr): return False return True def get_current_block(self): return self._blocks[self.cur_block] def copy(self): new_state = HostState(self._blocks) new_state.cur_block = self.cur_block new_state.cur_regular_task = self.cur_regular_task new_state.cur_rescue_task = self.cur_rescue_task new_state.cur_always_task = self.cur_always_task new_state.run_state = self.run_state new_state.fail_state = self.fail_state new_state.pending_setup = self.pending_setup new_state.did_rescue = self.did_rescue new_state.did_start_at_task = self.did_start_at_task if self.cur_dep_chain is not None: new_state.cur_dep_chain = self.cur_dep_chain[:] if self.tasks_child_state is not None: new_state.tasks_child_state = self.tasks_child_state.copy() if self.rescue_child_state is not None: new_state.rescue_child_state = self.rescue_child_state.copy() if self.always_child_state is not None: new_state.always_child_state = self.always_child_state.copy() return new_state class PlayIterator: # the primary running states for the play iteration ITERATING_SETUP = 0 ITERATING_TASKS = 1 ITERATING_RESCUE = 2 ITERATING_ALWAYS = 3 ITERATING_COMPLETE = 4 # the failure states for the play iteration, which are powers # of 2 as they may be or'ed together in certain circumstances FAILED_NONE = 0 FAILED_SETUP = 1 FAILED_TASKS = 2 FAILED_RESCUE = 4 FAILED_ALWAYS = 8 def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False): self._play = play self._blocks = [] self._variable_manager = variable_manager # Default options to gather gather_subset = play_context.gather_subset gather_timeout = play_context.gather_timeout fact_path = play_context.fact_path # Retrieve subset to gather if self._play.gather_subset is not None: gather_subset = self._play.gather_subset # Retrieve timeout for gather if self._play.gather_timeout is not None: gather_timeout = self._play.gather_timeout # Retrieve fact_path if self._play.fact_path is not None: fact_path = self._play.fact_path setup_block = Block(play=self._play) setup_task = Task(block=setup_block) setup_task.action = 'setup' setup_task.name = 'Gathering Facts' setup_task.tags = ['always'] setup_task.args = { 'gather_subset': gather_subset, } if gather_timeout: setup_task.args['gather_timeout'] = gather_timeout if fact_path: setup_task.args['fact_path'] = fact_path setup_task.set_loader(self._play._loader) # short circuit fact gathering if the entire playbook is conditional if self._play._included_conditional is not None: setup_task.when = self._play._included_conditional[:] setup_block.block = [setup_task] setup_block = setup_block.filter_tagged_tasks(play_context, all_vars) self._blocks.append(setup_block) self.cache_block_tasks(setup_block) for block in self._play.compile(): new_block = block.filter_tagged_tasks(play_context, all_vars) if new_block.has_tasks(): self.cache_block_tasks(new_block) self._blocks.append(new_block) for handler_block in self._play.handlers: self.cache_block_tasks(handler_block) self._host_states = {} start_at_matched = False batch = inventory.get_hosts(self._play.hosts) self.batch_size = len(batch) for host in batch: self._host_states[host.name] = HostState(blocks=self._blocks) # if we're looking to start at a specific task, iterate through # the tasks for this host until we find the specified task if play_context.start_at_task is not None and not start_at_done: while True: (s, task) = self.get_next_task_for_host(host, peek=True) if s.run_state == self.ITERATING_COMPLETE: break if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \ task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task): start_at_matched = True break else: self.get_next_task_for_host(host) # finally, reset the host's state to ITERATING_SETUP if start_at_matched: self._host_states[host.name].did_start_at_task = True self._host_states[host.name].run_state = self.ITERATING_SETUP if start_at_matched: # we have our match, so clear the start_at_task field on the # play context to flag that we've started at a task (and future # plays won't try to advance) play_context.start_at_task = None def get_host_state(self, host): # Since we're using the PlayIterator to carry forward failed hosts, # in the event that a previous host was not in the current inventory # we create a stub state for it now if host.name not in self._host_states: self._host_states[host.name] = HostState(blocks=[]) return self._host_states[host.name].copy() def cache_block_tasks(self, block): # now a noop, we've changed the way we do caching and finding of # original task entries, but just in case any 3rd party strategies # are using this we're leaving it here for now return def get_next_task_for_host(self, host, peek=False): display.debug("getting the next task for host %s" % host.name) s = self.get_host_state(host) task = None if s.run_state == self.ITERATING_COMPLETE: display.debug("host %s is done iterating, returning" % host.name) return (s, None) (s, task) = self._get_next_task_from_state(s, host=host, peek=peek) if not peek: self._host_states[host.name] = s display.debug("done getting next task for host %s" % host.name) display.debug(" ^ task is: %s" % task) display.debug(" ^ state is: %s" % s) return (s, task) def _get_next_task_from_state(self, state, host, peek, in_child=False): task = None # try and find the next task, given the current state. while True: # try to get the current block from the list of blocks, and # if we run past the end of the list we know we're done with # this block try: block = state._blocks[state.cur_block] except IndexError: state.run_state = self.ITERATING_COMPLETE return (state, None) if state.run_state == self.ITERATING_SETUP: # First, we check to see if we were pending setup. If not, this is # the first trip through ITERATING_SETUP, so we set the pending_setup # flag and try to determine if we do in fact want to gather facts for # the specified host. if not state.pending_setup: state.pending_setup = True # Gather facts if the default is 'smart' and we have not yet # done it for this host; or if 'explicit' and the play sets # gather_facts to True; or if 'implicit' and the play does # NOT explicitly set gather_facts to False. gathering = C.DEFAULT_GATHERING implied = self._play.gather_facts is None or boolean(self._play.gather_facts, strict=False) if (gathering == 'implicit' and implied) or \ (gathering == 'explicit' and boolean(self._play.gather_facts, strict=False)) or \ (gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('module_setup', False))): # The setup block is always self._blocks[0], as we inject it # during the play compilation in __init__ above. setup_block = self._blocks[0] if setup_block.has_tasks() and len(setup_block.block) > 0: task = setup_block.block[0] else: # This is the second trip through ITERATING_SETUP, so we clear # the flag and move onto the next block in the list while setting # the run state to ITERATING_TASKS state.pending_setup = False state.run_state = self.ITERATING_TASKS if not state.did_start_at_task: state.cur_block += 1 state.cur_regular_task = 0 state.cur_rescue_task = 0 state.cur_always_task = 0 state.child_state = None elif state.run_state == self.ITERATING_TASKS: # clear the pending setup flag, since we're past that and it didn't fail if state.pending_setup: state.pending_setup = False # First, we check for a child task state that is not failed, and if we # have one recurse into it for the next task. If we're done with the child # state, we clear it and drop back to getting the next task from the list. if state.tasks_child_state: (state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host, peek=peek, in_child=True) if self._check_failed_state(state.tasks_child_state): # failed child state, so clear it and move into the rescue portion state.tasks_child_state = None self._set_failed_state(state) else: # get the next task recursively if task is None or state.tasks_child_state.run_state == self.ITERATING_COMPLETE: # we're done with the child state, so clear it and continue # back to the top of the loop to get the next task state.tasks_child_state = None continue else: # First here, we check to see if we've failed anywhere down the chain # of states we have, and if so we move onto the rescue portion. Otherwise, # we check to see if we've moved past the end of the list of tasks. If so, # we move into the always portion of the block, otherwise we get the next # task from the list. if self._check_failed_state(state): state.run_state = self.ITERATING_RESCUE elif state.cur_regular_task >= len(block.block): state.run_state = self.ITERATING_ALWAYS else: task = block.block[state.cur_regular_task] # if the current task is actually a child block, create a child # state for us to recurse into on the next pass if isinstance(task, Block) or state.tasks_child_state is not None: state.tasks_child_state = HostState(blocks=[task]) state.tasks_child_state.run_state = self.ITERATING_TASKS # since we've created the child state, clear the task # so we can pick up the child state on the next pass task = None state.cur_regular_task += 1 elif state.run_state == self.ITERATING_RESCUE: # The process here is identical to ITERATING_TASKS, except instead # we move into the always portion of the block. if host.name in self._play._removed_hosts: self._play._removed_hosts.remove(host.name) if state.rescue_child_state: (state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek, in_child=True) if self._check_failed_state(state.rescue_child_state): state.rescue_child_state = None self._set_failed_state(state) else: if task is None or state.rescue_child_state.run_state == self.ITERATING_COMPLETE: state.rescue_child_state = None continue else: if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE: state.run_state = self.ITERATING_ALWAYS elif state.cur_rescue_task >= len(block.rescue): if len(block.rescue) > 0: state.fail_state = self.FAILED_NONE state.run_state = self.ITERATING_ALWAYS state.did_rescue = True else: task = block.rescue[state.cur_rescue_task] if isinstance(task, Block) or state.rescue_child_state is not None: state.rescue_child_state = HostState(blocks=[task]) state.rescue_child_state.run_state = self.ITERATING_TASKS task = None state.cur_rescue_task += 1 elif state.run_state == self.ITERATING_ALWAYS: # And again, the process here is identical to ITERATING_TASKS, except # instead we either move onto the next block in the list, or we set the # run state to ITERATING_COMPLETE in the event of any errors, or when we # have hit the end of the list of blocks. if state.always_child_state: (state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek, in_child=True) if self._check_failed_state(state.always_child_state): state.always_child_state = None self._set_failed_state(state) else: if task is None or state.always_child_state.run_state == self.ITERATING_COMPLETE: state.always_child_state = None continue else: if state.cur_always_task >= len(block.always): if state.fail_state != self.FAILED_NONE: state.run_state = self.ITERATING_COMPLETE else: state.cur_block += 1 state.cur_regular_task = 0 state.cur_rescue_task = 0 state.cur_always_task = 0 state.run_state = self.ITERATING_TASKS state.tasks_child_state = None state.rescue_child_state = None state.always_child_state = None state.did_rescue = False # we're advancing blocks, so if this was an end-of-role block we # mark the current role complete if block._eor and host.name in block._role._had_task_run and not in_child and not peek: block._role._completed[host.name] = True else: task = block.always[state.cur_always_task] if isinstance(task, Block) or state.always_child_state is not None: state.always_child_state = HostState(blocks=[task]) state.always_child_state.run_state = self.ITERATING_TASKS task = None state.cur_always_task += 1 elif state.run_state == self.ITERATING_COMPLETE: return (state, None) # if something above set the task, break out of the loop now if task: break return (state, task) def _set_failed_state(self, state): if state.run_state == self.ITERATING_SETUP: state.fail_state |= self.FAILED_SETUP state.run_state = self.ITERATING_COMPLETE elif state.run_state == self.ITERATING_TASKS: if state.tasks_child_state is not None: state.tasks_child_state = self._set_failed_state(state.tasks_child_state) else: state.fail_state |= self.FAILED_TASKS if state._blocks[state.cur_block].rescue: state.run_state = self.ITERATING_RESCUE elif state._blocks[state.cur_block].always: state.run_state = self.ITERATING_ALWAYS else: state.run_state = self.ITERATING_COMPLETE elif state.run_state == self.ITERATING_RESCUE: if state.rescue_child_state is not None: state.rescue_child_state = self._set_failed_state(state.rescue_child_state) else: state.fail_state |= self.FAILED_RESCUE if state._blocks[state.cur_block].always: state.run_state = self.ITERATING_ALWAYS else: state.run_state = self.ITERATING_COMPLETE elif state.run_state == self.ITERATING_ALWAYS: if state.always_child_state is not None: state.always_child_state = self._set_failed_state(state.always_child_state) else: state.fail_state |= self.FAILED_ALWAYS state.run_state = self.ITERATING_COMPLETE return state def mark_host_failed(self, host): s = self.get_host_state(host) display.debug("marking host %s failed, current state: %s" % (host, s)) s = self._set_failed_state(s) display.debug("^ failed state is now: %s" % s) self._host_states[host.name] = s self._play._removed_hosts.append(host.name) def get_failed_hosts(self): return dict((host, True) for (host, state) in iteritems(self._host_states) if self._check_failed_state(state)) def _check_failed_state(self, state): if state is None: return False elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state): return True elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state): return True elif state.fail_state != self.FAILED_NONE: if state.run_state == self.ITERATING_RESCUE and state.fail_state & self.FAILED_RESCUE == 0: return False elif state.run_state == self.ITERATING_ALWAYS and state.fail_state & self.FAILED_ALWAYS == 0: return False else: return not state.did_rescue elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state): cur_block = self._blocks[state.cur_block] if len(cur_block.rescue) > 0 and state.fail_state & self.FAILED_RESCUE == 0: return False else: return True return False def is_failed(self, host): s = self.get_host_state(host) return self._check_failed_state(s) def get_original_task(self, host, task): # now a noop because we've changed the way we do caching return (None, None) def _insert_tasks_into_state(self, state, task_list): # if we've failed at all, or if the task list is empty, just return the current state if state.fail_state != self.FAILED_NONE and state.run_state not in (self.ITERATING_RESCUE, self.ITERATING_ALWAYS) or not task_list: return state if state.run_state == self.ITERATING_TASKS: if state.tasks_child_state: state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list) else: target_block = state._blocks[state.cur_block].copy(exclude_parent=True) before = target_block.block[:state.cur_regular_task] after = target_block.block[state.cur_regular_task:] target_block.block = before + task_list + after state._blocks[state.cur_block] = target_block elif state.run_state == self.ITERATING_RESCUE: if state.rescue_child_state: state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list) else: target_block = state._blocks[state.cur_block].copy(exclude_parent=True) before = target_block.rescue[:state.cur_rescue_task] after = target_block.rescue[state.cur_rescue_task:] target_block.rescue = before + task_list + after state._blocks[state.cur_block] = target_block elif state.run_state == self.ITERATING_ALWAYS: if state.always_child_state: state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list) else: target_block = state._blocks[state.cur_block].copy(exclude_parent=True) before = target_block.always[:state.cur_always_task] after = target_block.always[state.cur_always_task:] target_block.always = before + task_list + after state._blocks[state.cur_block] = target_block return state def add_tasks(self, host, task_list): self._host_states[host.name] = self._insert_tasks_into_state(self.get_host_state(host), task_list) ansible-2.5.1/lib/ansible/executor/playbook_executor.py0000644000000000000000000003165713265756155023256 0ustar rootroot00000000000000# (c) 2012-2014, Michael DeHaan # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from ansible import constants as C from ansible.executor.task_queue_manager import TaskQueueManager from ansible.module_utils._text import to_native, to_text from ansible.playbook import Playbook from ansible.template import Templar from ansible.utils.helpers import pct_to_int from ansible.module_utils.parsing.convert_bool import boolean from ansible.utils.path import makedirs_safe from ansible.utils.ssh_functions import check_for_controlpersist try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class PlaybookExecutor: ''' This is the primary class for executing playbooks, and thus the basis for bin/ansible-playbook operation. ''' def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords): self._playbooks = playbooks self._inventory = inventory self._variable_manager = variable_manager self._loader = loader self._options = options self.passwords = passwords self._unreachable_hosts = dict() if options.listhosts or options.listtasks or options.listtags or options.syntax: self._tqm = None else: self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords) # Note: We run this here to cache whether the default ansible ssh # executable supports control persist. Sometime in the future we may # need to enhance this to check that ansible_ssh_executable specified # in inventory is also cached. We can't do this caching at the point # where it is used (in task_executor) because that is post-fork and # therefore would be discarded after every task. check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE) def run(self): ''' Run the given playbook, based on the settings in the play which may limit the runs to serialized groups, etc. ''' result = 0 entrylist = [] entry = {} try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) # FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path))) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] else: # make sure the tqm has callbacks loaded self._tqm.load_callbacks() self._tqm.send_callback('v2_playbook_on_start', pb) i = 1 plays = pb.get_plays() display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path))) for play in plays: if play._included_path is not None: self._loader.set_basedir(play._included_path) else: self._loader.set_basedir(pb._basedir) # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() # Create a temporary copy of the play here, so we can run post_validate # on it without the templating changes affecting the original object. # Doing this before vars_prompt to allow for using variables in prompt. all_vars = self._variable_manager.get_vars(play=play) templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) if play.vars_prompt: for var in new_play.vars_prompt: vname = var['name'] prompt = var.get("prompt", vname) default = var.get("default", None) private = boolean(var.get("private", True)) confirm = boolean(var.get("confirm", False)) encrypt = var.get("encrypt", None) salt_size = var.get("salt_size", None) salt = var.get("salt", None) if vname not in self._variable_manager.extra_vars: if self._tqm: self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default) else: # we are either in --list-